From 0759e170a7faafca0cee7b9b8929b0c6e15bcf77 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 7 Mar 2019 01:38:03 -0600 Subject: [PATCH 001/481] High/low balance separation See #685 for reasoning --- specs/core/0_beacon-chain.md | 95 +++++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bd709218a..3c2d90b69 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -64,6 +64,10 @@ - [`get_epoch_start_slot`](#get_epoch_start_slot) - [`is_active_validator`](#is_active_validator) - [`get_active_validator_indices`](#get_active_validator_indices) + - [`get_balance`](#get_balance) + - [`set_balance`](#set_balance) + - [`increase_balance`](#increase_balance) + - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) - [`split`](#split) - [`get_epoch_committee_count`](#get_epoch_committee_count) @@ -205,10 +209,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | Unit | | - | - | :-: | -| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | +| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 2,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | -| `FORK_CHOICE_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | +| `HIGH_BALANCE_INCREMENT` | `10 ** 9` (= 1,000,000,000) | Gwei | ### Initial values @@ -516,7 +520,7 @@ The following data structures are defined as [SimpleSerialize (SSZ)](https://git # Validator registry 'validator_registry': [Validator], - 'validator_balances': ['uint64'], + 'low_balances': ['uint32'], 'validator_registry_update_epoch': 'uint64', # Randomness and committees @@ -570,6 +574,8 @@ The following data structures are defined as [SimpleSerialize (SSZ)](https://git 'initiated_exit': 'bool', # Was the validator slashed 'slashed': 'bool', + # Rounded balance + 'high_balance': 'uint32' } ``` @@ -749,6 +755,45 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] ``` +### `get_balance` + +```python +def get_balance(state: BeaconState, index: int) -> int: + return ( + state.validator_registry[index].high_balance * HIGH_BALANCE_INCREMENT + + state.low_balances[index] + ) +``` +#### `set_balance` + +````python +def set_balance(state: BeaconState, index: int, new_balance: int) -> None: + validator = state.validator_registry[index] + HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 + if ( + validator.rounded_balance * HIGH_BALANCE_INCREMENT > new_balance or + validator.rounded_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance + ): + validator.rounded_balance = new_balance // HIGH_BALANCE_INCREMENT + state.validator_fractional_balances[index] = ( + new_balance - validator.rounded_balance * HIGH_BALANCE_INCREMENT + ) +```` + +#### `increase_balance` + +````python +def increase_balance(state: BeaconState, index: int, delta: int) -> None: + set_balance(state, index, get_balance(state, index) + delta) +```` + +#### `decrease_balance` + +````python +def decrease_balance(state: BeaconState, index: int, delta: int) -> None: + set_balance(state, index, get_balance(state, index) - delta) +```` + ### `get_permuted_index` ```python @@ -1105,7 +1150,7 @@ def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. """ - return min(state.validator_balances[index], MAX_DEPOSIT_AMOUNT) + return min(get_balance(state, index), MAX_DEPOSIT_AMOUNT) ``` ### `get_total_balance` @@ -1351,17 +1396,18 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: withdrawable_epoch=FAR_FUTURE_EPOCH, initiated_exit=False, slashed=False, + high_balance=0 ) # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.validator_balances.append(amount) + state.low_balances.append(0) + set_balance(state, len(state.validator_registry)-1, amount) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) assert state.validator_registry[index].withdrawal_credentials == withdrawal_credentials - - state.validator_balances[index] += amount + increase_balance(state, index, amount) ``` ### Routines for updating validator status @@ -1426,8 +1472,8 @@ def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: whistleblower_index = get_beacon_proposer_index(state, state.slot) whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - state.validator_balances[whistleblower_index] += whistleblower_reward - state.validator_balances[index] -= whistleblower_reward + increase_balance(state, whistleblower_index, whistleblower_reward) + decrease_balance(state, index, whistleblower_reward) validator.slashed = True validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH ``` @@ -1545,7 +1591,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Validator registry validator_registry=[], - validator_balances=[], + low_balances=[], validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees @@ -1657,9 +1703,12 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) for validator_index in active_validator_indices ] + # Use the rounded-balance-with-hysteresis supplied by the protocol for fork + # choice voting. This reduces the number of recomputations that need to be + # made for optimized implementations that precompute and save data def get_vote_count(block: BeaconBlock) -> int: return sum( - get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT + start_state.validator_registry[validator_index].high_balance for validator_index, target in attestation_targets if get_ancestor(store, target, block.slot) == block ) @@ -1956,12 +2005,12 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: Note that this function mutates ``state``. """ # Verify the amount and fee aren't individually too big (for anti-overflow purposes) - assert state.validator_balances[transfer.sender] >= max(transfer.amount, transfer.fee) + assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) # Verify that we have enough ETH to send, and that after the transfer the balance will be either # exactly zero or at least MIN_DEPOSIT_AMOUNT assert ( - state.validator_balances[transfer.sender] == transfer.amount + transfer.fee or - state.validator_balances[transfer.sender] >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT + get_balance(state, transfer.sender) == transfer.amount + transfer.fee or + get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT ) # A transfer is valid in only one slot assert state.slot == transfer.slot @@ -1983,9 +2032,9 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) ) # Process the transfer - state.validator_balances[transfer.sender] -= transfer.amount + transfer.fee - state.validator_balances[transfer.recipient] += transfer.amount - state.validator_balances[get_beacon_proposer_index(state, state.slot)] += transfer.fee + decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) + increase_balance(state, transfer.recipient, transfer.amount) + increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) ``` ### Per-epoch processing @@ -2320,10 +2369,10 @@ def apply_rewards(state: BeaconState) -> None: deltas1 = get_justification_and_finalization_deltas(state) deltas2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): - state.validator_balances[i] = max( + set_balance(state, i, max( 0, - state.validator_balances[i] + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] - ) + get_balance(state, i) + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] + )) ``` #### Ejections @@ -2337,7 +2386,7 @@ def process_ejections(state: BeaconState) -> None: and eject active validators with balance below ``EJECTION_BALANCE``. """ for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): - if state.validator_balances[index] < EJECTION_BALANCE: + if get_balance(state, index) < EJECTION_BALANCE: exit_validator(state, index) ``` @@ -2380,7 +2429,7 @@ def update_validator_registry(state: BeaconState) -> None: # Activate validators within the allowable balance churn balance_churn = 0 for index, validator in enumerate(state.validator_registry): - if validator.activation_epoch == FAR_FUTURE_EPOCH and state.validator_balances[index] >= MAX_DEPOSIT_AMOUNT: + if validator.activation_epoch == FAR_FUTURE_EPOCH and get_balance(state, index) >= MAX_DEPOSIT_AMOUNT: # Check the balance churn would be within the allowance balance_churn += get_effective_balance(state, index) if balance_churn > max_balance_churn: @@ -2461,7 +2510,7 @@ def process_slashings(state: BeaconState) -> None: get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT ) - state.validator_balances[index] -= penalty + decrease_balance(state, index, penalty) ``` ```python From be4b912373b9ee89851e217e8f500f444ef0e1fa Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 7 Mar 2019 04:02:53 -0600 Subject: [PATCH 002/481] Added underflow checking to decrease_balance --- specs/core/0_beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3c2d90b69..b0b3dbb2a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -791,7 +791,8 @@ def increase_balance(state: BeaconState, index: int, delta: int) -> None: ````python def decrease_balance(state: BeaconState, index: int, delta: int) -> None: - set_balance(state, index, get_balance(state, index) - delta) + cur_balance = get_balance(state, index) + set_balance(state, index, cur_balance - delta if cur_balance >= delta else 0) ```` ### `get_permuted_index` From f9a07f7653890fd74c6c023182ccb56004b5579d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 7 Mar 2019 04:04:05 -0600 Subject: [PATCH 003/481] Fixed MIN_DEPOSIT_AMOUNT --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b0b3dbb2a..c548dbe14 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -209,10 +209,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | Unit | | - | - | :-: | -| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 2,000,000,000) | Gwei | +| `MIN_DEPOSIT_AMOUNT` | `10**9` (= 1,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | -| `HIGH_BALANCE_INCREMENT` | `10 ** 9` (= 1,000,000,000) | Gwei | +| `HIGH_BALANCE_INCREMENT` | `10**9` (= 1,000,000,000) | Gwei | ### Initial values From bf6bdbb0210ee8020cac21b3f731178caad03ab7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 11 Mar 2019 12:38:11 -0600 Subject: [PATCH 004/481] cleanup minor var errors --- specs/core/0_beacon-chain.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f5fa2128a..ec9eedb51 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -768,12 +768,12 @@ def set_balance(state: BeaconState, index: int, new_balance: int) -> None: validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if ( - validator.rounded_balance * HIGH_BALANCE_INCREMENT > new_balance or - validator.rounded_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance + validator.high_balance * HIGH_BALANCE_INCREMENT > new_balance or + validator.high_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance ): - validator.rounded_balance = new_balance // HIGH_BALANCE_INCREMENT - state.validator_fractional_balances[index] = ( - new_balance - validator.rounded_balance * HIGH_BALANCE_INCREMENT + validator.high_balance = new_balance // HIGH_BALANCE_INCREMENT + state.low_balances[index] = ( + new_balance - validator.high_balance * HIGH_BALANCE_INCREMENT ) ```` From 3459ea0838da4b82b5f0e9d2fd2662e2c76529a2 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 11 Mar 2019 22:07:34 +0000 Subject: [PATCH 005/481] Check proposer is not slashed --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ceca50962..1bbab78dc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2247,8 +2247,10 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: assert block.previous_block_root == hash_tree_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) - # Verify proposer signature + # Verify proposer proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + assert not proposer.slashed + # Verify proposer signature assert bls_verify( pubkey=proposer.pubkey, message_hash=signed_root(block), From 4410a55c4d77921effbfecef131f9327a6633887 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 11 Mar 2019 23:30:08 -0500 Subject: [PATCH 006/481] Mandatory deposits Resolves #675 point 5. --- specs/core/0_beacon-chain.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ceca50962..4aaf09b4b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -315,6 +315,8 @@ The types are defined topologically to aid in facilitating an executable version { # Root of the deposit tree 'deposit_root': 'bytes32', + # Total number of deposits + 'deposit_count': 'uint64', # Block hash 'block_hash': 'bytes32', } @@ -1456,6 +1458,7 @@ When sufficiently many full deposits have been made the deposit contract emits t * `genesis_time` equals `time` in the `Eth2Genesis` log * `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log +* `latest_eth1_data.deposit_count` equals `deposit_count` in the `Eth2Genesis` log * `latest_eth1_data.block_hash` equals the hash of the block that included the log * `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest) @@ -1479,6 +1482,7 @@ When enough full deposits have been made to the deposit contract, an `Eth2Genesi * Let `genesis_time` be the timestamp specified in the `Eth2Genesis` log. * Let `genesis_eth1_data` be the `Eth1Data` object where: * `genesis_eth1_data.deposit_root` is the `deposit_root` contained in the `Eth2Genesis` log. + * `genesis_eth1_data.deposit_count` is the `deposit_count` contained in the `Eth2Genesis` log. * `genesis_eth1_data.block_hash` is the hash of the Ethereum 1.0 block that emitted the `Eth2Genesis` log. * Let `genesis_state = get_genesis_beacon_state(genesis_validator_deposits, genesis_time, genesis_eth1_data)`. * Let `genesis_block = get_empty_block()`. @@ -1497,6 +1501,7 @@ def get_empty_block() -> BeaconBlock: randao_reveal=EMPTY_SIGNATURE, eth1_data=Eth1Data( deposit_root=ZERO_HASH, + deposit_count=0, block_hash=ZERO_HASH, ), proposer_slashings=[], @@ -2443,7 +2448,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Deposits -Verify that `len(block.body.deposits) <= MAX_DEPOSITS`. +Verify that `len(block.body.deposits) <= MAX_DEPOSITS`. If `state.latest_eth1_data.deposit_count > state.deposit_index`, verify that `len(block.body.deposits) >= 1`. For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`. From 5266bbd378449d58746b505ddbe0b097fa3737e1 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Mon, 11 Mar 2019 23:38:22 -0500 Subject: [PATCH 007/481] Honest validator doc update for mandatory deposits Co-requisite with #758. --- specs/validator/0_beacon-chain-validator.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 0c95fb446..e1ccc9b31 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -182,15 +182,15 @@ epoch_signature = bls_sign( * Let `D` be the set of `Eth1DataVote` objects `vote` in `state.eth1_data_votes` where: * `vote.eth1_data.block_hash` is the hash of an eth1.0 block that is (i) part of the canonical chain, (ii) >= `ETH1_FOLLOW_DISTANCE` blocks behind the head, and (iii) newer than `state.latest_eth1_data.block_data`. + * `vote.eth1_data.deposit_count` is the deposit count of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. * `vote.eth1_data.deposit_root` is the deposit root of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`. * If `D` is empty: * Let `block_hash` be the block hash of the `ETH1_FOLLOW_DISTANCE`'th ancestor of the head of the canonical eth1.0 chain. - * Let `deposit_root` be the deposit root of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash` + * Let `deposit_root` and `deposit_count` be the deposit root and deposit count of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash` + * Let `best_vote_data = Eth1Data(block_hash=block_hash, deposit_root=deposit_root, deposit_count=deposit_count)`. * If `D` is nonempty: - * Let `best_vote` be the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height. - * Let `block_hash = best_vote.eth1_data.block_hash`. - * Let `deposit_root = best_vote.eth1_data.deposit_root`. -* Set `block.eth1_data = Eth1Data(deposit_root=deposit_root, block_hash=block_hash)`. + * Let `best_vote_data` be the `eth1_data` of the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height. +* Set `block.eth1_data = best_vote_data`. ##### Signature From a7544864d5de8eaa27f4630d2740f4acc8383d99 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 12 Mar 2019 10:02:52 +0000 Subject: [PATCH 008/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ec9eedb51..7d59a9e6d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -203,10 +203,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | Unit | | - | - | :-: | -| `MIN_DEPOSIT_AMOUNT` | `10**9` (= 1,000,000,000) | Gwei | +| `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | -| `HIGH_BALANCE_INCREMENT` | `10**9` (= 1,000,000,000) | Gwei | +| `HIGH_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | ### Initial values @@ -440,7 +440,7 @@ The types are defined topologically to aid in facilitating an executable version # Was the validator slashed 'slashed': 'bool', # Rounded balance - 'high_balance': 'uint32' + 'high_balance': 'uint64' } ``` @@ -756,25 +756,17 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L ```python def get_balance(state: BeaconState, index: int) -> int: - return ( - state.validator_registry[index].high_balance * HIGH_BALANCE_INCREMENT + - state.low_balances[index] - ) + return state.validator_registry[index].high_balance + state.low_balances[index] ``` #### `set_balance` ````python -def set_balance(state: BeaconState, index: int, new_balance: int) -> None: +def set_balance(state: BeaconState, index: int, balance: int) -> None: validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 - if ( - validator.high_balance * HIGH_BALANCE_INCREMENT > new_balance or - validator.high_balance * HIGH_BALANCE_INCREMENT + HALF_INCREMENT * 3 < new_balance - ): - validator.high_balance = new_balance // HIGH_BALANCE_INCREMENT - state.low_balances[index] = ( - new_balance - validator.high_balance * HIGH_BALANCE_INCREMENT - ) + if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: + validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT + state.low_balances[index] = balance - validator.high_balance ```` #### `increase_balance` From 6c359340607af3b6680268a75489780896225a32 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 12 Mar 2019 10:34:24 +0000 Subject: [PATCH 009/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4aaf09b4b..8edc3f232 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2448,7 +2448,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Deposits -Verify that `len(block.body.deposits) <= MAX_DEPOSITS`. If `state.latest_eth1_data.deposit_count > state.deposit_index`, verify that `len(block.body.deposits) >= 1`. +Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`. From 0a349f8bdc31d08f2c6f4a5b8e98427845c1716e Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 12 Mar 2019 15:58:31 +0000 Subject: [PATCH 010/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 7d59a9e6d..63737962d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -601,7 +601,7 @@ The types are defined topologically to aid in facilitating an executable version # Validator registry 'validator_registry': [Validator], - 'low_balances': ['uint32'], + 'balances': ['uint64'], 'validator_registry_update_epoch': 'uint64', # Randomness and committees @@ -756,7 +756,7 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L ```python def get_balance(state: BeaconState, index: int) -> int: - return state.validator_registry[index].high_balance + state.low_balances[index] + return state.balances[index] ``` #### `set_balance` @@ -766,7 +766,7 @@ def set_balance(state: BeaconState, index: int, balance: int) -> None: HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT - state.low_balances[index] = balance - validator.high_balance + state.balances[index] = balance ```` #### `increase_balance` @@ -1377,7 +1377,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.low_balances.append(0) + state.balances.append(0) set_balance(state, len(state.validator_registry)-1, amount) else: # Increase balance by deposit amount @@ -1567,7 +1567,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Validator registry validator_registry=[], - low_balances=[], + balances=[], validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees From e4a1ef16e6a42424d7f617d342183c2d29ba9b56 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 12 Mar 2019 13:46:58 -0700 Subject: [PATCH 011/481] Add networking specs --- specs/networking/messaging.md | 41 ++++ specs/networking/node-identification.md | 32 +++ specs/networking/rpc-interface.md | 246 ++++++++++++++++++++++++ 3 files changed, 319 insertions(+) create mode 100644 specs/networking/messaging.md create mode 100644 specs/networking/node-identification.md create mode 100644 specs/networking/rpc-interface.md diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md new file mode 100644 index 000000000..e88116f46 --- /dev/null +++ b/specs/networking/messaging.md @@ -0,0 +1,41 @@ +ETH 2.0 Networking Spec - Messaging +=== + +# Abstract + +This specification describes how individual Ethereum 2.0 messages are represented on the wire. + +The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in RFC 2119. + +# Motivation + +This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the ETH 2.0 specification evolves. + +# Specification + +## Message Structure + +An ETH 2.0 message consists of a single byte representing the message version followed by the encoded, potentially compressed body. We separate the message's version from the version included in the `libp2p` protocol path in order to allow encoding and compression schemes to be updated independently of the `libp2p` protocols themselves. + +It is unlikely that more than 255 message versions will need to be supported, so a single byte should suffice. + +Visually, a message looks like this: + +``` ++--------------------------+ +| version byte | ++--------------------------+ +| | +| body | +| | ++--------------------------+ +``` + +Clients MUST ignore messages with mal-formed bodies. The `version` byte MUST be one of the below values: + +## Version Byte Values + +### `0x01` + +- **Encoding Scheme:** SSZ +- **Compression Scheme:** Snappy diff --git a/specs/networking/node-identification.md b/specs/networking/node-identification.md new file mode 100644 index 000000000..27c1ebf9d --- /dev/null +++ b/specs/networking/node-identification.md @@ -0,0 +1,32 @@ +ETH 2.0 Networking Spec - Node Identification +=== + +# Abstract + +This specification describes how Ethereum 2.0 nodes identify and address each other on the network. + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. + +# Specification + +Clients use Ethereum Node Records (as described in [EIP-778](http://eips.ethereum.org/EIPS/eip-778)) to discover one another. Each ENR includes, among other things, the following keys: + +- The node's IP. +- The node's TCP port. +- The node's public key. + +For clients to be addressable, their ENR responses MUST contain all of the above keys. Client MUST verify the signature of any received ENRs, and disconnect from peers whose ENR signatures are invalid. Each node's public key MUST be unique. + +The keys above are enough to construct a [multiaddr](https://github.com/multiformats/multiaddr) for use with the rest of the `libp2p` stack. + +It is RECOMMENDED that clients set their TCP port to the default of `9000`. + +## Peer ID Generation + +The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key. `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. + +# See Also + +- [multiaddr](https://github.com/multiformats/multiaddr) +- [multihash](https://multiformats.io/multihash/) +- [go-libp2p-crypto](https://github.com/libp2p/go-libp2p-crypto) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md new file mode 100644 index 000000000..fdc9a11b3 --- /dev/null +++ b/specs/networking/rpc-interface.md @@ -0,0 +1,246 @@ +ETH 2.0 Networking Spec - RPC Interface +=== + +# Abstract + +The Ethereum 2.0 networking stack uses two modes of communication: a broadcast protocol that gossips information to interested parties via GossipSub, and an RPC protocol that retrieves information from specific clients. This specification defines the RPC protocol. + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. + +# Dependencies + +This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification), and [Beacon Chain](../core/0_beacon-chain.md) specifications. + +# Specification + +## Message Schemas + +Message body schemas are notated like this: + +``` +( + field_name_1: type + field_name_2: type +) +``` + +SSZ serialization is field-order dependent. Therefore, fields MUST be encoded and decoded according to the order described in this document. The encoded values of each field are concatenated to form the final encoded message body. Embedded structs are serialized as Containers unless otherwise noted. + +All referenced data structures can be found in the [0-beacon-chain](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#data-structures) specification. + +## `libp2p` Protocol Names + +A "Protocol Name" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. A client's supported protocol paths are negotiated by the `libp2p` stack at connection time; as such they are not part of individual message bodies. + +## RPC-Over-`libp2p` + +To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: + +``` +( + id: uint64 + method_id: uint16 + body: Request +) +``` + +and their corresponding responses are wrapped in a "response" structure: + +``` +( + id: uint64 + result: Response +) +``` + +If an error occurs, a variant of the response structure is returned: + +``` +( + id: uint64 + error: ( + code: uint16 + data: bytes + ) +) +``` + +The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically: + +1. The `id` member is REQUIRED. +2. The `id` member in the response MUST be the same as the value of the `id` in the request. +3. The `method_id` member is REQUIRED. +4. The `result` member is required on success, and MUST NOT exist if there was an error. +5. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. + +Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. + +The "method ID" fields in the below messages refer to the `method` field in the request structure above. + +The first 1,000 values in `error.code` are reserved for system use. The following error codes are predefined: + +1. `0`: Parse error. +2. `10`: Invalid request. +3. `20`: Method not found. +4. `30`: Server error. + +## Messages + +### Hello + +**Method ID:** `0` + +**Body**: + +``` +( + network_id: uint8 + latest_finalized_root: bytes32 + latest_finalized_epoch: uint64 + best_root: bytes32 + best_slot: uint64 +) +``` + +Clients exchange `hello` messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the `hello` message. In response, the receiving client MUST respond with its own `hello` message. + +Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions: + +1. If `network_id` belongs to a different chain, since the client definitionally cannot sync with this client. +2. If the `latest_finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3: + +``` + Root A + + +---+ + |xxx| +----+ Epoch 5 + +-+-+ + ^ + | + +-+-+ + | | +----+ Epoch 4 + +-+-+ +Root B ^ + | ++---+ +-+-+ +|xxx+<---+--->+ | +----+ Epoch 3 ++---+ | +---+ + | + +-+-+ + | | +-----------+ Epoch 2 + +-+-+ + ^ + | + +-+-+ + | | +-----------+ Epoch 1 + +---+ +``` + +Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD send beacon block roots to its counterparty via `beacon_block_roots` (i.e., RPC method `10`). + +### Goodbye + +**Method ID:** `1` + +**Body:** + +``` +( + reason: uint64 +) +``` + +Client MAY send `goodbye` messages upon disconnection. The reason field MUST be one of the following values: + +- `1`: Client shut down. +- `2`: Irrelevant network. +- `3`: Irrelevant shard. + +### Provide Beacon Block Roots + +**Method ID:** `10` + +**Body:** + +``` +# BlockRootSlot +( + block_root: HashTreeRoot + slot: uint64 +) + +( + roots: []BlockRootSlot +) +``` + +Send a list of block roots and slots to the peer. + +### Beacon Block Headers + +**Method ID:** `11` + +**Request Body** + +``` +( + start_root: HashTreeRoot + start_slot: uint64 + max_headers: uint64 + skip_slots: uint64 +) +``` + +**Response Body:** + +``` +( + headers: []BlockHeader +) +``` + +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is undefined for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were undefined in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further undefined, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. + +The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Client could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. + +### Beacon Block Bodies + +**Method ID:** `12` + +**Request Body:** + +``` +( + block_roots: []HashTreeRoot +) +``` + +**Response Body:** + +``` +( + block_bodies: []BeaconBlockBody +) +``` + +Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e., a `block_body` container with all zero fields). + +### Beacon Chain State + +**Note:** This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations. + +**Method ID:** `13` + +**Request Body:** + +``` +( + hashes: []HashTreeRoot +) +``` + +**Response Body:** TBD + +Requests contain the hashes of Merkle tree nodes that when merkelized yield the block's `state_root`. + +The response will contain the values that, when hashed, yield the hashes inside the request body. From 3371dcc23b1f412e4b93466436424ffaee96a8ae Mon Sep 17 00:00:00 2001 From: Vitalik Buterin Date: Wed, 13 Mar 2019 02:54:27 -0500 Subject: [PATCH 012/481] Added light client related files --- specs/light_client/merkle_proofs.md | 134 ++++++++++++++++++++++ specs/light_client/sync_protocol.md | 169 ++++++++++++++++++++++++++++ 2 files changed, 303 insertions(+) create mode 100644 specs/light_client/merkle_proofs.md create mode 100644 specs/light_client/sync_protocol.md diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md new file mode 100644 index 000000000..cf4dad2e3 --- /dev/null +++ b/specs/light_client/merkle_proofs.md @@ -0,0 +1,134 @@ +### Generalized Merkle tree index + +In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: + +``` + 1 + 2 3 +4 5 6 7 + ... +``` + +Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: + +```python +def merkle_tree(leaves): + o = [0] * len(leaves) + leaves + for i in range(len(leaves)-1, 0, -1): + o[i] = hash(o[i*2] + o[i*2+1]) + return o +``` + +We will define Merkle proofs in terms of generalized indices. + +### SSZ object to index + +We can describe the hash tree of any SSZ object, rooted in `hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For example, an object `{x: bytes32, y: List[uint64]}` would look as follows: + +``` + root + / \ + x y_root + / \ +y_data_root len(y) + / \ + /\ /\ + ....... +``` + +We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo[5]`. We'll describe paths as lists: in these three cases they are `["x"]`, `["y", "len"]` and `["y", 5]` respectively. We can now define a function `get_generalized_indices(object: Any, path: List[str OR int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. + +```python +def get_generalized_indices(obj: Any, path: List[str or int], root=1) -> List[int]: + if len(path) == 0: + return [root] + elif isinstance(obj, StaticList): + items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 + new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk + return get_generalized_indices(obj[path[0]], path[1:], new_root) + elif isinstance(obj, DynamicList) and path[0] == "len": + return [root * 2 + 1] + elif isinstance(obj, DynamicList) and isinstance(path[0], int): + assert path[0] < len(obj) + items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 + new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk + return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root) + elif hasattr(obj, "fields"): + index = list(fields.keys()).index(path[0]) + new_root = root * next_power_of_2(len(fields)) + index + return get_generalized_indices(getattr(obj, path[0]), path[1:], new_root) + else: + raise Exception("Unknown type / path") +``` + +### Merkle multiproofs + +We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14): + +``` + . + . . + . * * . +x x . . . . x * +``` + +. are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`. + +Here is code for creating and verifying a multiproof. First a helper: + +```python +def log2(x): + return 0 if x == 1 else 1 + log2(x//2) +``` + +First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: + +```python +def get_proof_indices(tree_indices: List[int]) -> List[int]: + # Get all indices touched by the proof + maximal_indices = set({}) + for i in tree_indices: + x = i + while x > 1: + maximal_indices.add(x ^ 1) + x //= 2 + maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1] + # Get indices that cannot be recalculated from earlier indices + redundant_indices = set({}) + proof = [] + for index in maximal_indices: + if index not in redundant_indices: + proof.append(index) + while index > 1: + redundant_indices.add(index) + if (index ^ 1) not in redundant_indices: + break + index //= 2 + return [i for i in proof if i not in tree_indices] +```` + +Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order. + +```python +def verify_multi_proof(root, indices, leaves, proof): + tree = {} + for index, leaf in zip(indices, leaves): + tree[index] = leaf + for index, proofitem in zip(get_proof_indices(indices), proof): + tree[index] = proofitem + indexqueue = sorted(tree.keys())[:-1] + i = 0 + while i < len(indexqueue): + index = indexqueue[i] + if index >= 2 and index^1 in tree: + tree[index//2] = hash(tree[index - index%2] + tree[index - index%2 + 1]) + indexqueue.append(index//2) + i += 1 + return (indices == []) or (1 in tree and tree[1] == root) +``` + +#### Proofs for execution + +We define `MerklePartial(f, arg1, arg2...)` as being a list of Merkle multiproofs of the sets of nodes in the hash trees of the SSZ objects that are needed to authenticate the values needed to compute some function `f(arg1, arg2...)`. An individual Merkle multiproof is given as a dynamic sized list of `bytes32` values, a `MerklePartial` is a fixed-size list of objects `{proof: ["bytes32"], value: "bytes32"}`, one for each `arg` to `f` (if some `arg` is a base type, then the multiproof is empty). + +Ideally, any function which accepts an SSZ object should also be able to accept a `MerklePartial` object as a substitute. diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md new file mode 100644 index 000000000..2a70dbb31 --- /dev/null +++ b/specs/light_client/sync_protocol.md @@ -0,0 +1,169 @@ +# Beacon chain light client syncing + +One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. + +### Preliminaries + +We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`). + +We define two expansions: + +* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState` +* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])` + +Note that there is now a new way to compute `get_active_validator_indices`: + +```python +def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]: + return state.latest_active_indices[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] +``` + +Note that it takes `state` instead of `state.validator_registry` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments. + +A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof. + +We add a data type `PeriodData` and four helpers: + +```python +{ + 'validator_count': 'uint64', + 'seed': 'bytes32', + 'committee': [Validator] +} +``` + +```python +def get_earlier_start_epoch(slot: Slot) -> int: + return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2 +def get_later_start_epoch(slot: Slot) -> int: + return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD + +def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: + period_start = get_earlier_start_epoch(header.slot) + validator_count = len(get_active_validator_indices(state, period_start)) + committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 + indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) + return PeriodData( + validator_count, + generate_seed(block.state, period_start), + [block.state.validator_registry[i] for i in indices] + ) + +def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: + period_start = get_later_start_epoch(header.slot) + validator_count = len(get_active_validator_indices(state, period_start)) + committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 + indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) + return PeriodData( + validator_count, + generate_seed(block.state, period_start), + [block.state.validator_registry[i] for i in indices] + ) +``` + +### Light client state + +A light client will keep track of: + +* A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever) +* A block header that they consider to be finalized (`finalized_header`) and do not expect to revert. +* `later_period_data = get_maximal_later_committee(finalized_header, shard_id)` +* `earlier_period_data = get_maximal_earlier_committee(finalized_header, shard_id)` + +We use the struct `validator_memory` to keep track of these variables. + +### Updating the shuffled committee + +If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_maximal_later_committee, validator_memory.finalized_header, shard_id)`. It can then compute: + +```python +earlier_period_data = later_period_data +later_period_data = get_later_period_data(new_committee_proof, finalized_header, shard_id) +``` + +The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch. + +### Computing the current committee + +Here is a helper to compute the committee at a slot given the maximal earlier and later committees: + +```python +def compute_committee(header: BeaconBlockHeader, + validator_memory: ValidatorMemory): + + earlier_validator_count = validator_memory.earlier_period_data.validator_count + later_validator_count = validator_memory.later_period_data.validator_count + earlier_committee = validator_memory.earlier_period_data.committee + later_committee = validator_memory.later_period_data.committee + earlier_start_epoch = get_earlier_start_epoch(header.slot) + later_start_epoch = get_later_start_epoch(header.slot) + epoch = slot_to_epoch(header.slot) + + actual_committee_count = max( + earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + ) + 1 + + def get_offset(count, end:bool): + return get_split_offset(count, + SHARD_COUNT * committee_count, + validator_memory.shard_id * committee_count + (1 if end else 0)) + + actual_earlier_committee = maximal_earlier_committee[ + 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) + ] + actual_later_committee = maximal_later_committee[ + 0:get_offset(later_validator_count, True) - get_offset(later_validator_count, False) + ] + def get_switchover_epoch(index): + return ( + bytes_to_int(hash(validator_memory.earlier_period_data.seed + bytes3(index))[0:8]) % + PERSISTENT_COMMITTEE_PERIOD + ) + # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from + # later committee; return a sorted list of the union of the two, deduplicated + return sorted(list(set( + [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] + ))) + +``` + +Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`). + +### Verifying blocks + +If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply: + +```python +{ + 'header': BlockHeader, + 'shard_aggregate_signature': 'bytes96', + 'shard_bitfield': 'bytes', + 'shard_parent_block': ShardBlock +} +``` + +The verification procedure is as follows: + +```python +def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool: + assert proof.shard_parent_block.beacon_chain_ref == hash_tree_root(proof.header) + committee = compute_committee(proof.header, validator_memory) + # Verify that we have >=50% support + support_balance = sum([c.high_balance for i, c in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) + total_balance = sum([c.high_balance for i, c in enumerate(committee)] + assert support_balance * 2 > total_balance + # Verify shard attestations + group_public_key = bls_aggregate_pubkeys([ + v.pubkey for v, index in enumerate(committee) if + get_bitfield_bit(proof.shard_bitfield, i) is True + ]) + assert bls_verify( + pubkey=group_public_key, + message_hash=hash_tree_root(shard_parent_block), + signature=shard_aggregate_signature, + domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER) + ) +``` +The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_ref, ShardBlock)`, which would cut off ~220 bytes. From 154eec0d027468d1a228f7f76e233eec9c4320ff Mon Sep 17 00:00:00 2001 From: Vitalik Buterin Date: Wed, 13 Mar 2019 03:04:16 -0500 Subject: [PATCH 013/481] Added links to light client docs in the readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c5c88daf9..e37539e3b 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,8 @@ Accompanying documents can be found in [specs](specs) and include * [BLS signature verification](specs/bls_signature.md) * [General test format](specs/test-format.md) * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) +* [Merkle proof formats](specs/light_client/merkle_proofs.md) +* [Light client syncing protocol](specs/light_client/sync_protocol.md) ## Design goals The following are the broad design goals for Ethereum 2.0: From 4442dfffb97c04a0697dd84bb85ba692613a3fff Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 13 Mar 2019 21:42:49 -0500 Subject: [PATCH 014/481] Fair proposer selection probability Note that as a side effect, proposer selection becomes less predictable, but I don't feel like this is a large downside. --- specs/core/0_beacon-chain.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..5ca59c66b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1014,7 +1014,13 @@ def get_beacon_proposer_index(state: BeaconState, assert previous_epoch <= epoch <= next_epoch - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] i = 0 + while i < len(first_committee): + rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] + candidate = first_committee[(epoch % i) % len(first_committee)] + if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: + return candidate + i += 1 return first_committee[epoch % len(first_committee)] ``` From 29caafc7567096325c14e7961550c4ba6f7c046b Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 13 Mar 2019 21:52:25 -0700 Subject: [PATCH 015/481] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index fdc9a11b3..e59f6a6b1 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -199,7 +199,7 @@ Send a list of block roots and slots to the peer. ) ``` -Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is undefined for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were undefined in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further undefined, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Client could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. From f3bddee7a5dcc8df1dfe0deeea9c875df0911415 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 13 Mar 2019 21:55:48 -0700 Subject: [PATCH 016/481] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index e59f6a6b1..e087abe96 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -165,7 +165,7 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be ``` # BlockRootSlot ( - block_root: HashTreeRoot + block_root: bytes32 slot: uint64 ) From 5a9ef0fd982f7c23c55afcfd43e07a022a2878b9 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 13 Mar 2019 21:55:59 -0700 Subject: [PATCH 017/481] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index e087abe96..e69f60801 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -201,7 +201,7 @@ Send a list of block roots and slots to the peer. Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. -The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Client could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. +The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. ### Beacon Block Bodies From 22e6212e6f08581aeca48dd6efee5e3c81c78f9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Wed, 13 Mar 2019 21:56:47 -0700 Subject: [PATCH 018/481] Update specs/networking/node-identification.md Co-Authored-By: mslipper --- specs/networking/node-identification.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/node-identification.md b/specs/networking/node-identification.md index 27c1ebf9d..0f1f9832b 100644 --- a/specs/networking/node-identification.md +++ b/specs/networking/node-identification.md @@ -23,7 +23,7 @@ It is RECOMMENDED that clients set their TCP port to the default of `9000`. ## Peer ID Generation -The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key. `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. +The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key struct (serialized in protobuf, refer to the [Peer ID spec](https://github.com/libp2p/specs/pull/100)). `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID. # See Also From 863f85c45ab2e3327c8c2e5f620af040b239fb40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= Date: Wed, 13 Mar 2019 21:57:29 -0700 Subject: [PATCH 019/481] Update specs/networking/rpc-interface.md Co-Authored-By: mslipper --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index e69f60801..d07e728c9 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -30,7 +30,7 @@ All referenced data structures can be found in the [0-beacon-chain](https://gith ## `libp2p` Protocol Names -A "Protocol Name" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. A client's supported protocol paths are negotiated by the `libp2p` stack at connection time; as such they are not part of individual message bodies. +A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualised thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID. ## RPC-Over-`libp2p` From b65601afdae18b35c9aad8dfa25c1c677f757ec0 Mon Sep 17 00:00:00 2001 From: Vitalik Buterin Date: Thu, 14 Mar 2019 08:29:03 -0500 Subject: [PATCH 020/481] Updated Merkle proof file --- specs/light_client/merkle_proofs.md | 63 +++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index cf4dad2e3..f52941118 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -1,3 +1,9 @@ +### Constants + +| Name | Value | +| - | - | +| `LENGTH_FLAG` | `2**64 - 1` | + ### Generalized Merkle tree index In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: @@ -36,17 +42,34 @@ y_data_root len(y) ....... ``` -We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo[5]`. We'll describe paths as lists: in these three cases they are `["x"]`, `["y", "len"]` and `["y", 5]` respectively. We can now define a function `get_generalized_indices(object: Any, path: List[str OR int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. +We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. ```python -def get_generalized_indices(obj: Any, path: List[str or int], root=1) -> List[int]: +def path_to_encoded_form(obj: Any, path: List[str or int]) -> List[int]: + if len(path) == 0: + return [] + if isinstance(path[0], "__len__"): + assert len(path) == 1 + return [LENGTH_FLAG] + elif isinstance(path[0], str) and hasattr(obj, "fields"): + return [list(obj.fields.keys()).index(path[0])] + path_to_encoded_form(getattr(obj, path[0]), path[1:]) + elif isinstance(obj, (StaticList, DynamicList)): + return [path[0]] + path_to_encoded_form(obj[path[0]], path[1:]) + else: + raise Exception("Unknown type / path") +``` + +We can now define a function `get_generalized_indices(object: Any, path: List[int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. + +```python +def get_generalized_indices(obj: Any, path: List[int], root=1) -> List[int]: if len(path) == 0: return [root] elif isinstance(obj, StaticList): items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk return get_generalized_indices(obj[path[0]], path[1:], new_root) - elif isinstance(obj, DynamicList) and path[0] == "len": + elif isinstance(obj, DynamicList) and path[0] == LENGTH_FLAG: return [root * 2 + 1] elif isinstance(obj, DynamicList) and isinstance(path[0], int): assert path[0] < len(obj) @@ -54,9 +77,9 @@ def get_generalized_indices(obj: Any, path: List[str or int], root=1) -> List[in new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root) elif hasattr(obj, "fields"): - index = list(fields.keys()).index(path[0]) - new_root = root * next_power_of_2(len(fields)) + index - return get_generalized_indices(getattr(obj, path[0]), path[1:], new_root) + field = list(fields.keys())[path[0]] + new_root = root * next_power_of_2(len(fields)) + path[0] + return get_generalized_indices(getattr(obj, field), path[1:], new_root) else: raise Exception("Unknown type / path") ``` @@ -109,6 +132,8 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]: Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order. +Here is the verification function: + ```python def verify_multi_proof(root, indices, leaves, proof): tree = {} @@ -127,8 +152,32 @@ def verify_multi_proof(root, indices, leaves, proof): return (indices == []) or (1 in tree and tree[1] == root) ``` +### MerklePartial + +We define: + +#### `MerklePartialLeaf` + +```python +{ + "path": ["uint64"], + "value": "bytes32" +} +``` + +#### `MerklePartial` + + +```python +{ + "root": "bytes32", + "values": [MerklePartialLeaf], + "proof": ["bytes32"] +} +``` + #### Proofs for execution -We define `MerklePartial(f, arg1, arg2...)` as being a list of Merkle multiproofs of the sets of nodes in the hash trees of the SSZ objects that are needed to authenticate the values needed to compute some function `f(arg1, arg2...)`. An individual Merkle multiproof is given as a dynamic sized list of `bytes32` values, a `MerklePartial` is a fixed-size list of objects `{proof: ["bytes32"], value: "bytes32"}`, one for each `arg` to `f` (if some `arg` is a base type, then the multiproof is empty). +We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `MerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. Ideally, any function which accepts an SSZ object should also be able to accept a `MerklePartial` object as a substitute. From 23d15f51a799fa9fc3c2a7aa5493b05fcad81568 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 14 Mar 2019 18:57:17 +0000 Subject: [PATCH 021/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5ca59c66b..d0be14e47 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1014,14 +1014,14 @@ def get_beacon_proposer_index(state: BeaconState, assert previous_epoch <= epoch <= next_epoch - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] i = 0 - while i < len(first_committee): + first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + i = 0 + while True: rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] candidate = first_committee[(epoch % i) % len(first_committee)] if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: return candidate i += 1 - return first_committee[epoch % len(first_committee)] ``` ### `verify_merkle_branch` From bbc51391153169e2d8071ab059b9b5001b5da072 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 14 Mar 2019 19:01:32 +0000 Subject: [PATCH 022/481] Update 0_beacon-chain.md Assuming `epoch % i` is a bug, and you meant `epoch + i`. @vbuterin --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d0be14e47..f2d06472b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1018,7 +1018,7 @@ def get_beacon_proposer_index(state: BeaconState, i = 0 while True: rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] - candidate = first_committee[(epoch % i) % len(first_committee)] + candidate = first_committee[(epoch + i) % len(first_committee)] if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: return candidate i += 1 From 24468de23bf1e0e4059ff5eeb5f9a5c621bc4f75 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 14 Mar 2019 20:28:44 -0500 Subject: [PATCH 023/481] Change get_shuffling to compute_committee See #729 and #774 The behavior now is that the first committee will consist of `get_permuted_index(0..n-1)`, the second committee `get_permuted_index(n....2n-1)`, etc. --- specs/core/0_beacon-chain.md | 47 +++++++++++++++--------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..206aebf76 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -63,7 +63,7 @@ - [`get_permuted_index`](#get_permuted_index) - [`split`](#split) - [`get_epoch_committee_count`](#get_epoch_committee_count) - - [`get_shuffling`](#get_shuffling) + - [`compute_committee`](#compute_committee) - [`get_previous_epoch_committee_count`](#get_previous_epoch_committee_count) - [`get_current_epoch_committee_count`](#get_current_epoch_committee_count) - [`get_next_epoch_committee_count`](#get_next_epoch_committee_count) @@ -803,28 +803,26 @@ def get_epoch_committee_count(active_validator_count: int) -> int: ) * SLOTS_PER_EPOCH ``` -### `get_shuffling` +### `compute_committee` ```python -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: +def compute_committee(validator_indices: [int], + seed: Bytes32, + index: int, + total_committees: int) -> List[ValidatorIndex]: """ - Shuffle active validators and split into crosslink committees. - Return a list of committees (each a list of validator indices). + Return the index'th shuffled committee out of a total `total_committees` + using the given validator_indices and seed """ - # Shuffle active validator indices - active_validator_indices = get_active_validator_indices(validators, epoch) - length = len(active_validator_indices) - shuffled_indices = [active_validator_indices[get_permuted_index(i, length, seed)] for i in range(length)] - - # Split the shuffled active validator indices - return split(shuffled_indices, get_epoch_committee_count(length)) + start_offset = get_split_offset(len(validator_indices), total_committees, index) + end_offset = get_split_offset(len(validator_indices), total_committees, index + 1) + return [ + validator_indices[get_permuted_index(i, len(validator_indices), seed)] + for i in range(start_offset, end_offset) + ] ``` -**Invariant**: if `get_shuffling(seed, validators, epoch)` returns some value `x` for some `epoch <= get_current_epoch(state) + ACTIVATION_EXIT_DELAY`, it should return the same value `x` for the same `seed` and `epoch` and possible future modifications of `validators` forever in phase 0, and until the ~1 year deletion delay in phase 2 and in the future. - -**Note**: this definition and the next few definitions make heavy use of repetitive computing. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. +**Note**: this definition and the next few definitions are highly inefficient as algorithms as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. ### `get_previous_epoch_committee_count` @@ -916,22 +914,17 @@ def get_crosslink_committees_at_slot(state: BeaconState, shuffling_epoch = state.current_shuffling_epoch shuffling_start_shard = state.current_shuffling_start_shard - shuffling = get_shuffling( - seed, - state.validator_registry, - shuffling_epoch, - ) - offset = slot % SLOTS_PER_EPOCH - committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH - slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT - + indices = get_active_validator_indices(state.validator_registry, shuffling_epoch) + committee_count = get_epoch_committee_count(len(indices)) + committees_per_slot = committee_count // EPOCH_LENGTH return [ ( - shuffling[committees_per_slot * offset + i], + compute_committee(indices, seed, committees_per_slot * offset + i, committee_count) (slot_start_shard + i) % SHARD_COUNT, ) for i in range(committees_per_slot) ] + ``` ### `get_block_root` From 5d327b63646d8831412853e7c972f866b78e628e Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 09:43:38 +0000 Subject: [PATCH 024/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 206aebf76..2dfeb7d69 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -806,7 +806,7 @@ def get_epoch_committee_count(active_validator_count: int) -> int: ### `compute_committee` ```python -def compute_committee(validator_indices: [int], +def compute_committee(validator_indices: List[ValidatorIndex], seed: Bytes32, index: int, total_committees: int) -> List[ValidatorIndex]: From 68d1c74784b8d5a1daa05b8098fa8bfb2e17b009 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 09:45:20 +0000 Subject: [PATCH 025/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2dfeb7d69..be3544ab8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -811,8 +811,8 @@ def compute_committee(validator_indices: List[ValidatorIndex], index: int, total_committees: int) -> List[ValidatorIndex]: """ - Return the index'th shuffled committee out of a total `total_committees` - using the given validator_indices and seed + Return the ``index``'th shuffled committee out of a total ``total_committees`` + using ``validator_indices`` and ``seed``. """ start_offset = get_split_offset(len(validator_indices), total_committees, index) end_offset = get_split_offset(len(validator_indices), total_committees, index + 1) From 4a5ef988138772f7c1851b4c72634af217142d2f Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 10:51:08 +0000 Subject: [PATCH 026/481] Move to SHA256 SHA256 is de facto blockchain standard. Standardisation of the hash function is a prerequisite for [full standardisation of BLS12-381 signatures](https://github.com/ethereum/eth2.0-specs/issues/605). Blockchain projects are likely to provide a cheap SHA256 opcods/precompile, and unlikely to provide a Keccak256 equivelent. (Even WASM-enabled blockchains are likely to provide a SHA256 opcode/precompile since WASM does *not* natively support optimised SHA256 CPU instructions.) With Ethereum 2.0 embracing SHA256 the wider industry is more likely to converge towards a unified cross-blockchain communication scheme via Merkle receipts. There are no security blockers with SHA256 (see comments by Dan Boneh [here](https://github.com/ethereum/eth2.0-specs/issues/612#issuecomment-470452562)). --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..1d474f618 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -660,7 +660,7 @@ def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: ### `hash` -The hash function is denoted by `hash`. In Phase 0 the beacon chain is deployed with the same hash function as Ethereum 1.0, i.e. Keccak-256 (also incorrectly known as SHA3). +The `hash` function is SHA256. Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethereum 2.0 deployment phase. From dac43eb564a3da19bf878364295486d0b7c03fb2 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 11:18:06 +0000 Subject: [PATCH 027/481] Simplify deposits Fix #760 --- specs/core/0_beacon-chain.md | 49 +++++++++++------------------------- 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..2d168cfc2 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -29,7 +29,6 @@ - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`SlashableAttestation`](#slashableattestation) - - [`DepositInput`](#depositinput) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) - [`Validator`](#validator) @@ -377,7 +376,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `DepositInput` +#### `DepositData` ```python { @@ -385,21 +384,10 @@ The types are defined topologically to aid in facilitating an executable version 'pubkey': 'bytes48', # Withdrawal credentials 'withdrawal_credentials': 'bytes32', - # A BLS signature of this `DepositInput` - 'proof_of_possession': 'bytes96', -} -``` - -#### `DepositData` - -```python -{ # Amount in Gwei 'amount': 'uint64', - # Timestamp from deposit contract - 'timestamp': 'uint64', - # Deposit input - 'deposit_input': DepositInput, + # Container self-signature + 'proof_of_possession': 'bytes96', } ``` @@ -512,7 +500,7 @@ The types are defined topologically to aid in facilitating an executable version # Index in the deposit tree 'index': 'uint64', # Data - 'deposit_data': DepositData, + 'data': DepositData, } ``` @@ -1278,19 +1266,12 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: Process a deposit from Ethereum 1.0. Note that this function mutates ``state``. """ - deposit_input = deposit.deposit_data.deposit_input - - # Should equal 8 bytes for deposit_data.amount + - # 8 bytes for deposit_data.timestamp + - # 176 bytes for deposit_data.deposit_input - # It should match the deposit_data in the eth1.0 deposit contract - serialized_deposit_data = serialize(deposit.deposit_data) # Deposits must be processed in order assert deposit.index == state.deposit_index # Verify the Merkle branch merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialized_deposit_data), + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialisation proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, index=deposit.index, @@ -1305,16 +1286,14 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: state.deposit_index += 1 validator_pubkeys = [v.pubkey for v in state.validator_registry] - pubkey = deposit_input.pubkey - amount = deposit.deposit_data.amount - withdrawal_credentials = deposit_input.withdrawal_credentials + pubkey = deposit.data.pubkey if pubkey not in validator_pubkeys: # Verify the proof of possession proof_is_valid = bls_verify( - pubkey=deposit_input.pubkey, - message_hash=signed_root(deposit_input), - signature=deposit_input.proof_of_possession, + pubkey=pubkey, + message_hash=signed_root(deposit.data), + signature=deposit.data.proof_of_possession, domain=get_domain( state.fork, get_current_epoch(state), @@ -1327,7 +1306,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Add new validator validator = Validator( pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, + withdrawal_credentials=deposit.data.withdrawal_credentials, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, @@ -1337,10 +1316,10 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.validator_balances.append(amount) + state.validator_balances.append(deposit.data.amount) else: # Increase balance by deposit amount - state.validator_balances[validator_pubkeys.index(pubkey)] += amount + state.validator_balances[validator_pubkeys.index(pubkey)] += deposit.data.amount ``` ### Routines for updating validator status @@ -1430,11 +1409,11 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### Deposit arguments -The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositInput`. +The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`. ### Withdrawal credentials -One of the `DepositInput` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: +One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: * `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE` * `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey From 22be21223b90160f4f772146d821b34848ee3572 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 11:24:59 +0000 Subject: [PATCH 028/481] Update merkle_proofs.md --- specs/light_client/merkle_proofs.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index f52941118..311a4aa5c 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -1,3 +1,5 @@ +**NOTICE**: This document is a work-in-progress for researchers and implementers. + ### Constants | Name | Value | From b566722b52ad2cd9f92a0b48953e3b784a04853d Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 11:25:15 +0000 Subject: [PATCH 029/481] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 2a70dbb31..b84d55dcf 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,3 +1,5 @@ +**NOTICE**: This document is a work-in-progress for researchers and implementers. + # Beacon chain light client syncing One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. From 58603f276e3dc137599d6684b7e47650f03871b7 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 11:28:55 +0000 Subject: [PATCH 030/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2d168cfc2..9f8bec933 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1271,7 +1271,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the Merkle branch merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialisation + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, index=deposit.index, From 96ab535704fb18b3bbcf585159bf499a87d277bf Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 12:40:52 +0000 Subject: [PATCH 031/481] Simplify and cleanup process_attestation Improve readability and testability (by avoiding untriggerable `assert`). Fix #753. --- specs/core/0_beacon-chain.md | 86 +++++++++++++----------------------- 1 file changed, 30 insertions(+), 56 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..53695aeea 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2375,65 +2375,39 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: Process ``Attestation`` transaction. Note that this function mutates ``state``. """ - # Can't submit attestations that are too far in history (or in prehistory) - assert attestation.data.slot >= GENESIS_SLOT - assert state.slot <= attestation.data.slot + SLOTS_PER_EPOCH - # Can't submit attestations too quickly - assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot - # Verify that the justified epoch and root is correct - if slot_to_epoch(attestation.data.slot) >= get_current_epoch(state): - # Case 1: current epoch attestations - assert attestation.data.source_epoch == state.current_justified_epoch - assert attestation.data.source_root == state.current_justified_root - else: - # Case 2: previous epoch attestations - assert attestation.data.source_epoch == state.previous_justified_epoch - assert attestation.data.source_root == state.previous_justified_root - # Check that the crosslink data is valid - acceptable_crosslink_data = { - # Case 1: Latest crosslink matches the one in the state - attestation.data.previous_crosslink, - # Case 2: State has already been updated, state's latest crosslink matches the crosslink - # the attestation is trying to create - Crosslink( - crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot) - ) - } - assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data - # Attestation must be nonempty! - assert attestation.aggregation_bitfield != b'\x00' * len(attestation.aggregation_bitfield) - # Custody must be empty (to be removed in phase 1) - assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) - # Get the committee for the specific shard that this attestation is for - crosslink_committee = [ - committee for committee, shard in get_crosslink_committees_at_slot(state, attestation.data.slot) - if shard == attestation.data.shard - ][0] - # Custody bitfield must be a subset of the attestation bitfield - for i in range(len(crosslink_committee)): - if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b0: - assert get_bitfield_bit(attestation.custody_bitfield, i) == 0b0 - # Verify aggregate signature - participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - custody_bit_1_participants = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) - custody_bit_0_participants = [i for i in participants if i not in custody_bit_1_participants] + assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot + assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - assert bls_verify_multiple( - pubkeys=[ - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_participants]), - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_participants]), - ], - message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b1)), - ], + # Check source epoch and root match current or previous justified epoch and root + assert (slot_to_epoch(attestation.data.slot), attestation.data.source_epoch, attestation.data.source_root) in { + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), + (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), + } + + # Check crosslink data + assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] + assert state.latest_crosslinks[attestation.data.shard] in { + attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink + Crosslink( # Case 2: latest crosslink matches current crosslink + crosslink_data_root=attestation.data.crosslink_data_root, + epoch=slot_to_epoch(attestation.data.slot), + ), + } + + # Check custody bits [to be generalised in phase 1] + assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) + + # Check aggregate signature [to be generalised in phase 1] + participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert len(participants) != 0 + assert bls_verify( + pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), + message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), signature=attestation.aggregate_signature, domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION), ) - # Crosslink data root is zero (to be removed in phase 1) - assert attestation.data.crosslink_data_root == ZERO_HASH - # Apply the attestation + + # Cache pending attestation pending_attestation = PendingAttestation( data=attestation.data, aggregation_bitfield=attestation.aggregation_bitfield, @@ -2442,7 +2416,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ) if slot_to_epoch(attestation.data.slot) == get_current_epoch(state): state.current_epoch_attestations.append(pending_attestation) - elif slot_to_epoch(attestation.data.slot) == get_previous_epoch(state): + else: state.previous_epoch_attestations.append(pending_attestation) ``` From d8d653dd949e92e4baf368040afa0b8216922a55 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 12:51:46 +0000 Subject: [PATCH 032/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 53695aeea..766bdf53c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2378,8 +2378,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - # Check source epoch and root match current or previous justified epoch and root - assert (slot_to_epoch(attestation.data.slot), attestation.data.source_epoch, attestation.data.source_root) in { + # Check target epoch, source epoch, and source root + target_epoch = slot_to_epoch(attestation.data.slot) + assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), } @@ -2390,7 +2391,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink Crosslink( # Case 2: latest crosslink matches current crosslink crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot), + epoch=target_epoch, ), } @@ -2404,7 +2405,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), signature=attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(attestation.data.slot), DOMAIN_ATTESTATION), + domain=get_domain(state.fork, target_epoch, DOMAIN_ATTESTATION), ) # Cache pending attestation @@ -2414,7 +2415,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot ) - if slot_to_epoch(attestation.data.slot) == get_current_epoch(state): + if target_epoch == get_current_epoch(state): state.current_epoch_attestations.append(pending_attestation) else: state.previous_epoch_attestations.append(pending_attestation) From 4a8d748c55aecbdf91170677321abb46dca4fc4b Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:05:46 +0000 Subject: [PATCH 033/481] Milder ejections See item 22 in https://github.com/ethereum/eth2.0-specs/issues/675. Also partially addresses https://github.com/ethereum/eth2.0-specs/issues/527. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..0c06972ff 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2050,7 +2050,7 @@ def process_ejections(state: BeaconState) -> None: """ for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): if state.validator_balances[index] < EJECTION_BALANCE: - exit_validator(state, index) + initiate_validator_exit(state, index) ``` #### Validator registry and shuffling seed data From e912ed7fca1d62c16979074b28c79fc1072df019 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:12:03 +0000 Subject: [PATCH 034/481] Include recently slashed churn in churn queue Addresses #527 in combination with #784. --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..36d45b31b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2102,7 +2102,9 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - balance_churn = 0 + total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + balance_churn = total_at_end - total_at_start for index, validator in enumerate(state.validator_registry): if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: # Check the balance churn would be within the allowance From 4b461838d27647b80a487b9543ffbf64e610adac Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:32:24 +0000 Subject: [PATCH 035/481] `GENESIS_EPOCH - 1` for `previous_shuffling_epoch` and `previous_justified_epoch` See item 26 in #675. --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..54741c1e0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1531,7 +1531,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)], previous_shuffling_start_shard=GENESIS_START_SHARD, current_shuffling_start_shard=GENESIS_START_SHARD, - previous_shuffling_epoch=GENESIS_EPOCH, + previous_shuffling_epoch=GENESIS_EPOCH - 1, current_shuffling_epoch=GENESIS_EPOCH, previous_shuffling_seed=ZERO_HASH, current_shuffling_seed=ZERO_HASH, @@ -1539,7 +1539,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Finality previous_epoch_attestations=[], current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH, + previous_justified_epoch=GENESIS_EPOCH - 1, current_justified_epoch=GENESIS_EPOCH, previous_justified_root=ZERO_HASH, current_justified_root=ZERO_HASH, From 1236e8e1fa8c9ba235f316f6739aa55672ffcd45 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:53:24 +0000 Subject: [PATCH 036/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 36d45b31b..ff8b09071 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -595,6 +595,7 @@ The types are defined topologically to aid in facilitating an executable version 'validator_registry': [Validator], 'validator_balances': ['uint64'], 'validator_registry_update_epoch': 'uint64', + 'validator_registry_update_slashed_balances': 'uint64', # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], @@ -2116,6 +2117,7 @@ def update_validator_registry(state: BeaconState) -> None: exit_validator(state, index) state.validator_registry_update_epoch = current_epoch + state.validator_registry_update_slashed_balances = total_at_end ``` Run the following function: @@ -2164,7 +2166,7 @@ def process_slashings(state: BeaconState) -> None: total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` - total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_start = state.validator_registry_update_slashed_balances total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] total_penalties = total_at_end - total_at_start From 709e0df39f4161e63a1a7877a133b1e121fcb174 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 13:54:33 +0000 Subject: [PATCH 037/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ff8b09071..36c6023e1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2103,7 +2103,7 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_start = state.validator_registry_update_slashed_balances total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] balance_churn = total_at_end - total_at_start for index, validator in enumerate(state.validator_registry): @@ -2166,7 +2166,7 @@ def process_slashings(state: BeaconState) -> None: total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` - total_at_start = state.validator_registry_update_slashed_balances + total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] total_penalties = total_at_end - total_at_start From 3944fd4b1554ea928b625c6e7ae47fc6d6076737 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 15 Mar 2019 18:18:37 +0000 Subject: [PATCH 038/481] Clarify empty sums in BLS spec (#782) Fix #775. --- specs/bls_signature.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/bls_signature.md b/specs/bls_signature.md index b0490b7ae..14a4f1cb7 100644 --- a/specs/bls_signature.md +++ b/specs/bls_signature.md @@ -110,11 +110,11 @@ def modular_squareroot(value: Fq2) -> Fq2: ### `bls_aggregate_pubkeys` -Let `bls_aggregate_pubkeys(pubkeys: List[Bytes48]) -> Bytes48` return `pubkeys[0] + .... + pubkeys[len(pubkeys)-1]`, where `+` is the elliptic curve addition operation over the G1 curve. +Let `bls_aggregate_pubkeys(pubkeys: List[Bytes48]) -> Bytes48` return `pubkeys[0] + .... + pubkeys[len(pubkeys)-1]`, where `+` is the elliptic curve addition operation over the G1 curve. (When `len(pubkeys) == 0` the empty sum is the G1 point at infinity.) ### `bls_aggregate_signatures` -Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `signatures[0] + .... + signatures[len(signatures)-1]`, where `+` is the elliptic curve addition operation over the G2 curve. +Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `signatures[0] + .... + signatures[len(signatures)-1]`, where `+` is the elliptic curve addition operation over the G2 curve. (When `len(signatures) == 0` the empty sum is the G2 point at infinity.) ## Signature verification From 6b118d2398d5506fdd5d1659e85b93b1cf9e2bc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6ren=20Steiger?= Date: Sat, 16 Mar 2019 05:16:47 +0100 Subject: [PATCH 039/481] Add trailing comma (#789) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index daa1bc108..f2e639a96 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -628,7 +628,7 @@ The types are defined topologically to aid in facilitating an executable version # Ethereum 1.0 chain data 'latest_eth1_data': Eth1Data, 'eth1_data_votes': [Eth1DataVote], - 'deposit_index': 'uint64' + 'deposit_index': 'uint64', } ``` From e5ff0d59ad22a9bf42acaa0bcf1e7ba646d4b41d Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 16 Mar 2019 11:23:41 +0000 Subject: [PATCH 040/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 36c6023e1..26f579233 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -595,7 +595,6 @@ The types are defined topologically to aid in facilitating an executable version 'validator_registry': [Validator], 'validator_balances': ['uint64'], 'validator_registry_update_epoch': 'uint64', - 'validator_registry_update_slashed_balances': 'uint64', # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], @@ -2103,21 +2102,23 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - total_at_start = state.validator_registry_update_slashed_balances - total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - balance_churn = total_at_end - total_at_start - for index, validator in enumerate(state.validator_registry): - if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break + if state.current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: + balance_churn = ( + state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - + state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + ) - # Exit validator - exit_validator(state, index) + for index, validator in enumerate(state.validator_registry): + if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: + # Check the balance churn would be within the allowance + balance_churn += get_effective_balance(state, index) + if balance_churn > max_balance_churn: + break + + # Exit validator + exit_validator(state, index) state.validator_registry_update_epoch = current_epoch - state.validator_registry_update_slashed_balances = total_at_end ``` Run the following function: From 1a0938169bce4388c4443809a221862a259d9b69 Mon Sep 17 00:00:00 2001 From: NIC Lin Date: Sat, 16 Mar 2019 20:45:39 +0800 Subject: [PATCH 041/481] Fix `get_split_offset` (#790) --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 1713c6cbf..b2f567ed8 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -118,9 +118,9 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md def get_split_offset(list_size: int, chunks: int, index: int) -> int: """ Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k+1, i)] + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] """ - return (len(list_size) * index) // chunks + return (list_size * index) // chunks ```` #### `get_shuffled_committee` From 919b99e0aea2a8338cafcdd984a5531cfbfe08fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6ren=20Steiger?= Date: Sat, 16 Mar 2019 13:46:45 +0100 Subject: [PATCH 042/481] Add missing word (#788) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f2e639a96..1d53f1c3f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -186,7 +186,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | -* For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) +* For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) ### Deposit contract From 65162e037110be66dd793a37ecbc0a285e36e8b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6ren=20Steiger?= Date: Sat, 16 Mar 2019 21:56:10 +0100 Subject: [PATCH 043/481] Update 0_beacon-chain.md (#791) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1d53f1c3f..5ab3da052 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1042,7 +1042,7 @@ def get_attestation_participants(state: BeaconState, attestation_data: AttestationData, bitfield: bytes) -> List[ValidatorIndex]: """ - Return the participant indices at for the ``attestation_data`` and ``bitfield``. + Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. """ # Find the committee in the list with the desired shard crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) From 1967a8939d54601e56946705453f304118042bb5 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 17 Mar 2019 06:25:56 -0500 Subject: [PATCH 044/481] Fixed some variable names --- specs/light_client/sync_protocol.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index b84d55dcf..2a420abcb 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -69,14 +69,14 @@ A light client will keep track of: * A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever) * A block header that they consider to be finalized (`finalized_header`) and do not expect to revert. -* `later_period_data = get_maximal_later_committee(finalized_header, shard_id)` -* `earlier_period_data = get_maximal_earlier_committee(finalized_header, shard_id)` +* `later_period_data = get_later_period_data(finalized_header, shard_id)` +* `earlier_period_data = get_earlier_period_data(finalized_header, shard_id)` We use the struct `validator_memory` to keep track of these variables. ### Updating the shuffled committee -If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_maximal_later_committee, validator_memory.finalized_header, shard_id)`. It can then compute: +If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_later_period_data, validator_memory.finalized_header, shard_id)`. It can then compute: ```python earlier_period_data = later_period_data @@ -95,13 +95,13 @@ def compute_committee(header: BeaconBlockHeader, earlier_validator_count = validator_memory.earlier_period_data.validator_count later_validator_count = validator_memory.later_period_data.validator_count - earlier_committee = validator_memory.earlier_period_data.committee - later_committee = validator_memory.later_period_data.committee + maximal_earlier_committee = validator_memory.earlier_period_data.committee + maximal_later_committee = validator_memory.later_period_data.committee earlier_start_epoch = get_earlier_start_epoch(header.slot) later_start_epoch = get_later_start_epoch(header.slot) epoch = slot_to_epoch(header.slot) - actual_committee_count = max( + committee_count = max( earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), ) + 1 From 506fdf40424e05876dfbd32bc2cece4895330185 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 17 Mar 2019 06:33:19 -0500 Subject: [PATCH 045/481] Added FixedSizeList wrappers (#777) * Added FixedSizeList wrappers Requires corresponding changes to the spec pythonizer. * FixedSizeList -> Vector --- specs/core/0_beacon-chain.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5ab3da052..454cf105b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1528,7 +1528,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees - latest_randao_mixes=[ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)], + latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), previous_shuffling_start_shard=GENESIS_START_SHARD, current_shuffling_start_shard=GENESIS_START_SHARD, previous_shuffling_epoch=GENESIS_EPOCH, @@ -1548,11 +1548,11 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], finalized_root=ZERO_HASH, # Recent state - latest_crosslinks=[Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)], - latest_block_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], - latest_state_roots=[ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)], - latest_active_index_roots=[ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)], - latest_slashed_balances=[0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)], + latest_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), + latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), + latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), + latest_slashed_balances=Vector([0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)]), latest_block_header=get_temporary_block_header(get_empty_block()), historical_roots=[], From 390ece7fbeb09e285f3bd79d89fcf6d9a5f75dc4 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 17 Mar 2019 11:33:29 +0000 Subject: [PATCH 046/481] Rename "vector" to "tuple" in SSZ spec (#794) To be done in combination with #777. Also: * Define "fixed-size" and "variable-size" more rigorously * Use `"` vs `'` consistently * Add missing `"` --- specs/simple-serialize.md | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 862d13edf..378a1a7cb 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -12,7 +12,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) - - [Tuples, containers, lists](#tuples-containers-lists) + - [Vectors, containers, lists](#vectors-containers-lists) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Self-signed containers](#self-signed-containers) @@ -34,12 +34,14 @@ This is a **work in progress** describing typing, serialization and Merkleizatio ### Composite types * **container**: ordered heterogenous collection of values - * key-pair curly bracket notation `{}`, e.g. `{'foo': "uint64", 'bar': "bool"}` -* **tuple**: ordered fixed-length homogeneous collection of values + * key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}` +* **vector**: ordered fixed-length homogeneous collection of values * angle bracket notation `[type, N]`, e.g. `["uint64", N]` * **list**: ordered variable-length homogenous collection of values * angle bracket notation `[type]`, e.g. `["uint64"]` +We recursively define "variable-size" types to be lists and all types that contains a variable-size type. All other types are said to be "fixed-size". + ### Aliases For convenience we alias: @@ -54,34 +56,34 @@ We recursively define the `serialize` function which consumes an object `value` *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. -### `uintN` +### `"uintN"` ```python assert N in [8, 16, 32, 64, 128, 256] -return value.to_bytes(N // 8, 'little') +return value.to_bytes(N // 8, "little") ``` -### `bool` +### `"bool"` ```python assert value in (True, False) -return b'\x01' if value is True else b'\x00' +return b"\x01" if value is True else b"\x00" ``` -### Tuples, containers, lists +### Vectors, containers, lists -If `value` is fixed-length (i.e. does not embed a list): +If `value` is fixed-size: ```python -return ''.join([serialize(element) for element in value]) +return "".join([serialize(element) for element in value]) ``` -If `value` is variable-length (i.e. embeds a list): +If `value` is variable-size: ```python -serialized_bytes = ''.join([serialize(element) for element in value]) +serialized_bytes = "".join([serialize(element) for element in value]) assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) -serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') +serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, "little") return serialized_length + serialized_bytes ``` @@ -99,9 +101,9 @@ We first define helper functions: We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: -* `merkleize(pack(value))` if `value` is a basic object or a tuple of basic objects +* `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects * `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects -* `merkleize([hash_tree_root(element) for element in value])` if `value` is a tuple of composite objects or a container +* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container * `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects ## Self-signed containers From 6b82f5e9995acc7ed9f2e24bb8edf213767c60e7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 17 Mar 2019 19:33:42 +0800 Subject: [PATCH 047/481] Set `GENESIS_FORK_VERSION` to a `bytes4` constant (#792) * Set `GENESIS_FORK_VERSION` to a `bytes4` constant * Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 454cf105b..a631bf2fc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -208,7 +208,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | | - | - | -| `GENESIS_FORK_VERSION` | `0` | +| `GENESIS_FORK_VERSION` | `int_to_bytes4(0)` | | `GENESIS_SLOT` | `2**32` | | `GENESIS_EPOCH` | `slot_to_epoch(GENESIS_SLOT)` | | `GENESIS_START_SHARD` | `0` | @@ -1517,8 +1517,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], slot=GENESIS_SLOT, genesis_time=genesis_time, fork=Fork( - previous_version=int_to_bytes4(GENESIS_FORK_VERSION), - current_version=int_to_bytes4(GENESIS_FORK_VERSION), + previous_version=GENESIS_FORK_VERSION, + current_version=GENESIS_FORK_VERSION, epoch=GENESIS_EPOCH, ), From 91a0c1ba5f6c4439345b4476c8a1637140b48f28 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 17 Mar 2019 06:44:19 -0500 Subject: [PATCH 048/481] Persistent committee size per slot reduced to max 128 (#734) * Persistent committee size per slot target 128 max 256 Cuts down the cost of verifying a shard chain and aggregating signatures for a shard chain, and also makes the shard chain signatures more usable by light clients for verification as they would only need to keep track of a max 256-sized committee. --- specs/core/1_shard-data-chains.md | 85 +++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 27 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index b2f567ed8..c76f9ba08 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -17,41 +17,51 @@ At the current stage, Phase 1, while fundamentally feature-complete, is still su - [Time parameters](#time-parameters) - [Max operations per block](#max-operations-per-block) - [Signature domains](#signature-domains) - - [Shard chains and crosslink data](#shard-chains-and-crosslink-data) - - [Helper functions](#helper-functions) +- [Shard chains and crosslink data](#shard-chains-and-crosslink-data) + - [Helper functions](#helper-functions) - [`get_split_offset`](#get_split_offset) - [`get_shuffled_committee`](#get_shuffled_committee) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [Data Structures](#data-structures) + - [Data Structures](#data-structures) - [Shard chain blocks](#shard-chain-blocks) - - [Shard block processing](#shard-block-processing) + - [Shard block processing](#shard-block-processing) - [Verifying shard block data](#verifying-shard-block-data) - [Verifying a crosslink](#verifying-a-crosslink) - [Shard block fork choice rule](#shard-block-fork-choice-rule) - - [Updates to the beacon chain](#updates-to-the-beacon-chain) - - [Data structures](#data-structures) +- [Updates to the beacon chain](#updates-to-the-beacon-chain) + - [Data structures](#data-structures) - [`Validator`](#validator) - [`BeaconBlockBody`](#beaconblockbody) + - [`BeaconState`](#beaconstate) - [`BranchChallenge`](#branchchallenge) - [`BranchResponse`](#branchresponse) - [`BranchChallengeRecord`](#branchchallengerecord) + - [`InteractiveCustodyChallengeRecord`](#interactivecustodychallengerecord) + - [`InteractiveCustodyChallengeInitiation`](#interactivecustodychallengeinitiation) + - [`InteractiveCustodyChallengeResponse`](#interactivecustodychallengeresponse) + - [`InteractiveCustodyChallengeContinuation`](#interactivecustodychallengecontinuation) - [`SubkeyReveal`](#subkeyreveal) - [Helpers](#helpers) - - [`get_attestation_data_merkle_depth`](#get_attestation_data_merkle_depth) + - [`get_branch_challenge_record_by_id`](#get_branch_challenge_record_by_id) + - [`get_custody_challenge_record_by_id`](#get_custody_challenge_record_by_id) + - [`get_attestation_merkle_depth`](#get_attestation_merkle_depth) - [`epoch_to_custody_period`](#epoch_to_custody_period) - [`slot_to_custody_period`](#slot_to_custody_period) - [`get_current_custody_period`](#get_current_custody_period) - [`verify_custody_subkey_reveal`](#verify_custody_subkey_reveal) - - [`prepare_validator_for_withdrawal`](#prepare_validator_for_withdrawal) + - [`verify_signed_challenge_message`](#verify_signed_challenge_message) - [`penalize_validator`](#penalize_validator) - - [Per-slot processing](#per-slot-processing) + - [Per-slot processing](#per-slot-processing) - [Operations](#operations) - [Branch challenges](#branch-challenges) - [Branch responses](#branch-responses) - [Subkey reveals](#subkey-reveals) - - [Per-epoch processing](#per-epoch-processing) - - [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition) + - [Interactive custody challenge initiations](#interactive-custody-challenge-initiations) + - [Interactive custody challenge responses](#interactive-custody-challenge-responses) + - [Interactive custody challenge continuations](#interactive-custody-challenge-continuations) + - [Per-epoch processing](#per-epoch-processing) + - [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition) @@ -128,16 +138,27 @@ def get_split_offset(list_size: int, chunks: int, index: int) -> int: ```python def get_shuffled_committee(state: BeaconState, shard: Shard, - committee_start_epoch: Epoch) -> List[ValidatorIndex]: + committee_start_epoch: Epoch, + index: int, + committee_count: int) -> List[ValidatorIndex]: """ Return shuffled committee. """ - validator_indices = get_active_validator_indices(state.validators, committee_start_epoch) + active_validator_indices = get_active_validator_indices(state.validator_registry, committee_start_epoch) + length = len(active_validator_indices) seed = generate_seed(state, committee_start_epoch) - start_offset = get_split_offset(len(validator_indices), SHARD_COUNT, shard) - end_offset = get_split_offset(len(validator_indices), SHARD_COUNT, shard + 1) + start_offset = get_split_offset( + length, + SHARD_COUNT * committee_count, + shard * committee_count + index, + ) + end_offset = get_split_offset( + length, + SHARD_COUNT * committee_count, + shard * committee_count + index + 1, + ) return [ - validator_indices[get_permuted_index(i, len(validator_indices), seed)] + active_validator_indices[get_permuted_index(i, length, seed)] for i in range(start_offset, end_offset) ] ``` @@ -147,15 +168,24 @@ def get_shuffled_committee(state: BeaconState, ```python def get_persistent_committee(state: BeaconState, shard: Shard, - epoch: Epoch) -> List[ValidatorIndex]: + slot: Slot) -> List[ValidatorIndex]: """ - Return the persistent committee for the given ``shard`` at the given ``epoch``. + Return the persistent committee for the given ``shard`` at the given ``slot``. """ - earlier_committee_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 - earlier_committee = get_shuffled_committee(state, shard, earlier_committee_start_epoch) + + earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 + later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD - later_committee_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD - later_committee = get_shuffled_committee(state, shard, later_committee_start_epoch) + committee_count = max( + len(get_active_validator_indices(state.validator_registry, earlier_start_epoch)) // + (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + len(get_active_validator_indices(state.validator_registry, later_start_epoch)) // + (SHARD_COUNT * TARGET_COMMITTEE_SIZE), + ) + 1 + + index = slot % committee_count + earlier_committee = get_shuffled_committee(state, shard, earlier_start_epoch, index, committee_count) + later_committee = get_shuffled_committee(state, shard, later_start_epoch, index, committee_count) def get_switchover_epoch(index): return ( @@ -170,6 +200,7 @@ def get_persistent_committee(state: BeaconState, [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] ))) ``` + #### `get_shard_proposer_index` ```python @@ -181,14 +212,14 @@ def get_shard_proposer_index(state: BeaconState, int_to_bytes8(shard) + int_to_bytes8(slot) ) - persistent_committee = get_persistent_committee(state, shard, slot_to_epoch(slot)) + persistent_committee = get_persistent_committee(state, shard, slot) # Default proposer index = bytes_to_int(seed[0:8]) % len(persistent_committee) # If default proposer exits, try the other proposers in order; if all are exited # return None (ie. no block can be proposed) validators_to_try = persistent_committee[index:] + persistent_committee[:index] for index in validators_to_try: - if is_active_validator(state.validators[index], get_current_epoch(state)): + if is_active_validator(state.validator_registry[index], get_current_epoch(state)): return index return None ``` @@ -233,14 +264,14 @@ To validate a block header on shard `shard_block.shard_id`, compute as follows: * Verify that `shard_block.beacon_chain_ref` is the hash of a block in the (canonical) beacon chain with slot less than or equal to `slot`. * Verify that `shard_block.beacon_chain_ref` is equal to or a descendant of the `shard_block.beacon_chain_ref` specified in the `ShardBlock` pointed to by `shard_block.parent_root`. * Let `state` be the state of the beacon chain block referred to by `shard_block.beacon_chain_ref`. -* Let `persistent_committee = get_persistent_committee(state, shard_block.shard_id, slot_to_epoch(shard_block.slot))`. +* Let `persistent_committee = get_persistent_committee(state, shard_block.shard_id, shard_block.slot)`. * Assert `verify_bitfield(shard_block.participation_bitfield, len(persistent_committee))` -* For every `i in range(len(persistent_committee))` where `is_active_validator(state.validators[persistent_committee[i]], get_current_epoch(state))` returns `False`, verify that `get_bitfield_bit(shard_block.participation_bitfield, i) == 0` +* For every `i in range(len(persistent_committee))` where `is_active_validator(state.validator_registry[persistent_committee[i]], get_current_epoch(state))` returns `False`, verify that `get_bitfield_bit(shard_block.participation_bitfield, i) == 0` * Let `proposer_index = get_shard_proposer_index(state, shard_block.shard_id, shard_block.slot)`. * Verify that `proposer_index` is not `None`. * Let `msg` be the `shard_block` but with `shard_block.signature` set to `[0, 0]`. * Verify that `bls_verify(pubkey=validators[proposer_index].pubkey, message_hash=hash(msg), signature=shard_block.signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_PROPOSER))` passes. -* Let `group_public_key = bls_aggregate_pubkeys([state.validators[index].pubkey for i, index in enumerate(persistent_committee) if get_bitfield_bit(shard_block.participation_bitfield, i) is True])`. +* Let `group_public_key = bls_aggregate_pubkeys([state.validator_registry[index].pubkey for i, index in enumerate(persistent_committee) if get_bitfield_bit(shard_block.participation_bitfield, i) is True])`. * Verify that `bls_verify(pubkey=group_public_key, message_hash=shard_block.parent_root, sig=shard_block.aggregate_signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER))` passes. ### Verifying shard block data From d25c18b320ac9acac0d825f2d1977e313613c1d5 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 17 Mar 2019 11:48:47 +0000 Subject: [PATCH 049/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1bbab78dc..03a9df2a1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2247,7 +2247,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: assert block.previous_block_root == hash_tree_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) - # Verify proposer + # Verify proposer is not slashed proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] assert not proposer.slashed # Verify proposer signature From fba333c79185f8eaa84cad816f82dc124c581988 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Sun, 17 Mar 2019 21:19:12 -0700 Subject: [PATCH 050/481] Updates from review --- specs/networking/rpc-interface.md | 35 ++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index d07e728c9..f505a4663 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -24,7 +24,7 @@ Message body schemas are notated like this: ) ``` -SSZ serialization is field-order dependent. Therefore, fields MUST be encoded and decoded according to the order described in this document. The encoded values of each field are concatenated to form the final encoded message body. Embedded structs are serialized as Containers unless otherwise noted. +Embedded types are serialized as SSZ Containers unless otherwise noted. All referenced data structures can be found in the [0-beacon-chain](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#data-structures) specification. @@ -34,7 +34,7 @@ A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp ## RPC-Over-`libp2p` -To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: +To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/beacon/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: ``` ( @@ -49,6 +49,7 @@ and their corresponding responses are wrapped in a "response" structure: ``` ( id: uint64 + is_error: boolean result: Response ) ``` @@ -58,7 +59,8 @@ If an error occurs, a variant of the response structure is returned: ``` ( id: uint64 - error: ( + is_error: boolean + result: ( code: uint16 data: bytes ) @@ -69,11 +71,13 @@ The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](http 1. The `id` member is REQUIRED. 2. The `id` member in the response MUST be the same as the value of the `id` in the request. -3. The `method_id` member is REQUIRED. -4. The `result` member is required on success, and MUST NOT exist if there was an error. -5. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. +3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED. +4. The `method_id` member is REQUIRED. +5. The `result` member is required on success, and MUST NOT exist if there was an error. +6. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. +7. `is_error` MUST be `true` on errors, or `false` otherwise. -Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. +Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests. The "method ID" fields in the below messages refer to the `method` field in the request structure above. @@ -136,7 +140,7 @@ Root B ^ +---+ ``` -Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD send beacon block roots to its counterparty via `beacon_block_roots` (i.e., RPC method `10`). +Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e., RPC method `10`). ### Goodbye @@ -154,13 +158,20 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be - `1`: Client shut down. - `2`: Irrelevant network. -- `3`: Irrelevant shard. +- `3`: Too many peers. +- `4`: Fault/error. -### Provide Beacon Block Roots +### Request Beacon Block Roots **Method ID:** `10` -**Body:** +**Request Body** + +``` +() +``` + +**Response Body:** ``` # BlockRootSlot @@ -174,7 +185,7 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be ) ``` -Send a list of block roots and slots to the peer. +Send a list of block roots and slots to the requesting peer. ### Beacon Block Headers From 003961362887a2e05dd7af55840da57aedfa41dd Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 18 Mar 2019 19:08:41 +0000 Subject: [PATCH 051/481] Simplify exit_validator Minor cleanup --- specs/core/0_beacon-chain.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..434f2b680 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1377,17 +1377,14 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: ```python def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: """ - Exit the validator of the given ``index``. + Exit the validator with the given ``index``. Note that this function mutates ``state``. """ validator = state.validator_registry[index] - delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - # The following updates only occur if not previous exited - if validator.exit_epoch <= delayed_activation_exit_epoch: - return - else: - validator.exit_epoch = delayed_activation_exit_epoch + # Update validator exit epoch if not previously exited + if validator.exit_epoch == FAR_FUTURE_EPOCH: + validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` #### `slash_validator` From 2dce326310cc99adccf083c4a06b7cc09b68d244 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 18 Mar 2019 16:02:31 -0700 Subject: [PATCH 052/481] Bring back envelope --- specs/networking/messaging.md | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md index e88116f46..de92fe6d4 100644 --- a/specs/networking/messaging.md +++ b/specs/networking/messaging.md @@ -15,15 +15,17 @@ This specification seeks to define a messaging protocol that is flexible enough ## Message Structure -An ETH 2.0 message consists of a single byte representing the message version followed by the encoded, potentially compressed body. We separate the message's version from the version included in the `libp2p` protocol path in order to allow encoding and compression schemes to be updated independently of the `libp2p` protocols themselves. - -It is unlikely that more than 255 message versions will need to be supported, so a single byte should suffice. +An ETH 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself. Visually, a message looks like this: ``` +--------------------------+ -| version byte | +| compression nibble | ++--------------------------+ +| encoding nibble | ++--------------------------+ +| body length (uint64) | +--------------------------+ | | | body | @@ -31,11 +33,12 @@ Visually, a message looks like this: +--------------------------+ ``` -Clients MUST ignore messages with mal-formed bodies. The `version` byte MUST be one of the below values: +Clients MUST ignore messages with mal-formed bodies. The compression/encoding nibbles MUST be one of the following values: -## Version Byte Values +## Compression Nibble Values -### `0x01` +- `0x0`: no compression -- **Encoding Scheme:** SSZ -- **Compression Scheme:** Snappy +## Encoding Nibble Values + +- `0x1`: SSZ From dc4b652f72339063bfbaae378e850d173168c9f6 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:03:42 +0000 Subject: [PATCH 053/481] Only slash active validators This is to prevent a spam/DoS attack where validators with zero balance get "slashed" but no validator loses any balance. --- specs/core/0_beacon-chain.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..2113472e3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2315,8 +2315,8 @@ def process_proposer_slashing(state: BeaconState, assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 - # Proposer is not yet slashed - assert proposer.slashed is False + # Proposer is active and not already slashed + assert is_active_validator(proposer) and proposer.slashed is False # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( @@ -2355,6 +2355,7 @@ def process_attester_slashing(state: BeaconState, index for index in attestation1.validator_indices if ( index in attestation2.validator_indices and + is_active_validator(state.validator_registry[index]) and state.validator_registry[index].slashed is False ) ] From 2b454d57f11d8e1bde78dd1aa83116df2b2417ee Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:08:17 +0000 Subject: [PATCH 054/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2113472e3..9ed620b83 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -59,6 +59,7 @@ - [`get_current_epoch`](#get_current_epoch) - [`get_epoch_start_slot`](#get_epoch_start_slot) - [`is_active_validator`](#is_active_validator) + - [`is_slashable_validator`](#is_slashable_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_permuted_index`](#get_permuted_index) - [`split`](#split) @@ -737,6 +738,18 @@ def is_active_validator(validator: Validator, epoch: Epoch) -> bool: return validator.activation_epoch <= epoch < validator.exit_epoch ``` +### `is_slashable_validator` +```python +def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is slashable. + """ + return ( + validator.activation_epoch <= epoch < validator.withdrawable_epoch and + validator.slashed is False + ) +``` + ### `get_active_validator_indices` ```python @@ -2315,8 +2328,8 @@ def process_proposer_slashing(state: BeaconState, assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 - # Proposer is active and not already slashed - assert is_active_validator(proposer) and proposer.slashed is False + # Check proposer is slashable + assert is_slashable_validator(proposer) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( @@ -2355,8 +2368,7 @@ def process_attester_slashing(state: BeaconState, index for index in attestation1.validator_indices if ( index in attestation2.validator_indices and - is_active_validator(state.validator_registry[index]) and - state.validator_registry[index].slashed is False + is_slashable_validator(state.validator_registry[index]) ) ] assert len(slashable_indices) >= 1 From 0c383ce4a1d4770bdb21975023a2ca7a3ef5f522 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:11:18 +0000 Subject: [PATCH 055/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9ed620b83..d377b8d45 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1412,7 +1412,6 @@ def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[index] - assert state.slot < get_epoch_start_slot(validator.withdrawable_epoch) # [TO BE REMOVED IN PHASE 2] exit_validator(state, index) state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) From e91036cfc9fbf9d05b03da0180ed5be95cc916ca Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 11:12:50 +0000 Subject: [PATCH 056/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d377b8d45..4a6170418 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2328,7 +2328,7 @@ def process_proposer_slashing(state: BeaconState, # But the headers are different assert proposer_slashing.header_1 != proposer_slashing.header_2 # Check proposer is slashable - assert is_slashable_validator(proposer) + assert is_slashable_validator(proposer, get_current_epoch(state)) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( @@ -2367,7 +2367,7 @@ def process_attester_slashing(state: BeaconState, index for index in attestation1.validator_indices if ( index in attestation2.validator_indices and - is_slashable_validator(state.validator_registry[index]) + is_slashable_validator(state.validator_registry[index], get_current_epoch(state)) ) ] assert len(slashable_indices) >= 1 From 78f47f2069ec753c08dd7b278f7fc073b086cc34 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 12:23:17 +0000 Subject: [PATCH 057/481] Avoid underflow in voluntary exits --- specs/core/0_beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..be6a52e68 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2472,6 +2472,7 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch # Must have been in the validator set long enough + assert validator.activation_epoch != FAR_FUTURE_EPOCH assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature assert bls_verify( From dd39d25c86d812e7d5ac24e6bc5f043426e3617d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 19 Mar 2019 09:32:06 -0500 Subject: [PATCH 058/481] Replace committee exponential backoff with max progress Removes the mechanism that only rotates committees if blocks have been finalized and every shard has been crosslinked or at exponentially decreasing intervals, and replaces it with a rule that shard committees can only progress a maximum of 64 epochs at a time to preserve the invariant that maximum possible work required per epoch for a validator is O(1). --- specs/core/0_beacon-chain.md | 95 +++++++++++------------------------- 1 file changed, 28 insertions(+), 67 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a631bf2fc..6877b9358 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -185,8 +185,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MAX_INDICES_PER_SLASHABLE_VOTE` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | +| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) +* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // EPOCH_LENGTH` ### Deposit contract @@ -598,12 +600,7 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'previous_shuffling_start_shard': 'uint64', 'current_shuffling_start_shard': 'uint64', - 'previous_shuffling_epoch': 'uint64', - 'current_shuffling_epoch': 'uint64', - 'previous_shuffling_seed': 'bytes32', - 'current_shuffling_seed': 'bytes32', # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -849,7 +846,7 @@ def get_current_epoch_committee_count(state: BeaconState) -> int: """ current_active_validators = get_active_validator_indices( state.validator_registry, - state.current_shuffling_epoch, + get_current_epoch(state), ) return get_epoch_committee_count(len(current_active_validators)) ``` @@ -886,40 +883,30 @@ def get_crosslink_committees_at_slot(state: BeaconState, next_epoch = current_epoch + 1 assert previous_epoch <= epoch <= next_epoch + committees_per_epoch = get_epoch_committee_count(get_active_validator_indices( + state.validator_registry, + epoch, + )) if epoch == current_epoch: - committees_per_epoch = get_current_epoch_committee_count(state) - seed = state.current_shuffling_seed - shuffling_epoch = state.current_shuffling_epoch shuffling_start_shard = state.current_shuffling_start_shard elif epoch == previous_epoch: - committees_per_epoch = get_previous_epoch_committee_count(state) - seed = state.previous_shuffling_seed - shuffling_epoch = state.previous_shuffling_epoch - shuffling_start_shard = state.previous_shuffling_start_shard + shuffling_start_shard = ( + state.current_shuffling_start_shard - EPOCH_LENGTH * committees_per_epoch + ) % SHARD_COUNT elif epoch == next_epoch: - epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch - if registry_change: - committees_per_epoch = get_next_epoch_committee_count(state) - seed = generate_seed(state, next_epoch) - shuffling_epoch = next_epoch - current_committees_per_epoch = get_current_epoch_committee_count(state) - shuffling_start_shard = (state.current_shuffling_start_shard + current_committees_per_epoch) % SHARD_COUNT - elif epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): - committees_per_epoch = get_next_epoch_committee_count(state) - seed = generate_seed(state, next_epoch) - shuffling_epoch = next_epoch - shuffling_start_shard = state.current_shuffling_start_shard - else: - committees_per_epoch = get_current_epoch_committee_count(state) - seed = state.current_shuffling_seed - shuffling_epoch = state.current_shuffling_epoch - shuffling_start_shard = state.current_shuffling_start_shard + current_epoch_committees = get_epoch_committee_count(get_active_validator_indices( + state.validator_registry, + current_epoch, + )) + shuffling_start_shard = ( + state.current_shuffling_start_shard + EPOCH_LENGTH * current_epoch_committees + ) % SHARD_COUNT shuffling = get_shuffling( - seed, + generate_seed(state, epoch), state.validator_registry, - shuffling_epoch, + epoch, ) offset = slot % SLOTS_PER_EPOCH committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH @@ -1529,12 +1516,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), - previous_shuffling_start_shard=GENESIS_START_SHARD, current_shuffling_start_shard=GENESIS_START_SHARD, - previous_shuffling_epoch=GENESIS_EPOCH, - current_shuffling_epoch=GENESIS_EPOCH, - previous_shuffling_seed=ZERO_HASH, - current_shuffling_seed=ZERO_HASH, # Finality previous_epoch_attestations=[], @@ -1574,7 +1556,6 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root - state.current_shuffling_seed = generate_seed(state, GENESIS_EPOCH) return state ``` @@ -1855,7 +1836,7 @@ def process_crosslinks(state: BeaconState) -> None: total_balance = get_total_balance(state, crosslink_committee) if 3 * participating_balance >= 2 * total_balance: state.latest_crosslinks[shard] = Crosslink( - epoch=slot_to_epoch(slot), + epoch=min(slot_to_epoch(slot), state.latest_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), crosslink_data_root=winning_root ) ``` @@ -2060,14 +2041,6 @@ def should_update_validator_registry(state: BeaconState) -> bool: # Must have finalized a new block if state.finalized_epoch <= state.validator_registry_update_epoch: return False - # Must have processed new crosslinks on all shards of the current epoch - shards_to_check = [ - (state.current_shuffling_start_shard + i) % SHARD_COUNT - for i in range(get_current_epoch_committee_count(state)) - ] - for shard in shards_to_check: - if state.latest_crosslinks[shard].epoch <= state.validator_registry_update_epoch: - return False return True ``` @@ -2119,30 +2092,17 @@ def update_validator_registry(state: BeaconState) -> None: Run the following function: ```python -def update_registry_and_shuffling_data(state: BeaconState) -> None: - # First set previous shuffling data to current shuffling data - state.previous_shuffling_epoch = state.current_shuffling_epoch - state.previous_shuffling_start_shard = state.current_shuffling_start_shard - state.previous_shuffling_seed = state.current_shuffling_seed +def update_registry(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = current_epoch + 1 # Check if we should update, and if so, update if should_update_validator_registry(state): update_validator_registry(state) - # If we update the registry, update the shuffling data and shards as well - state.current_shuffling_epoch = next_epoch - state.current_shuffling_start_shard = ( - state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) - state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) - else: - # If processing at least one crosslink keeps failing, then reshuffle every power of two, - # but don't update the current_shuffling_start_shard - epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch - if epochs_since_last_registry_update > 1 and is_power_of_two(epochs_since_last_registry_update): - state.current_shuffling_epoch = next_epoch - state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) + # If we update the registry, update the shuffling data 2/3 or and shards as well + state.current_shuffling_start_shard = ( + state.current_shuffling_start_shard + + get_current_epoch_committee_count(state) % SHARD_COUNT + ) ``` **Invariant**: the active index root that is hashed into the shuffling seed actually is the `hash_tree_root` of the validator set that is used for that epoch. @@ -2397,7 +2357,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # the attestation is trying to create Crosslink( crosslink_data_root=attestation.data.crosslink_data_root, - epoch=slot_to_epoch(attestation.data.slot) + epoch=min(slot_to_epoch(attestation.data.slot), + attestation.data.previous_crosslink.epoch + MAX_CROSSLINK_EPOCHS) ) } assert state.latest_crosslinks[attestation.data.shard] in acceptable_crosslink_data From db92235d9ed3eeffa846e50eef82895567cf77c7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 19 Mar 2019 09:34:37 -0500 Subject: [PATCH 059/481] Removed some no-longer-necessary functions --- specs/core/0_beacon-chain.md | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6877b9358..9e52148a0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -64,9 +64,7 @@ - [`split`](#split) - [`get_epoch_committee_count`](#get_epoch_committee_count) - [`get_shuffling`](#get_shuffling) - - [`get_previous_epoch_committee_count`](#get_previous_epoch_committee_count) - [`get_current_epoch_committee_count`](#get_current_epoch_committee_count) - - [`get_next_epoch_committee_count`](#get_next_epoch_committee_count) - [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot) - [`get_block_root`](#get_block_root) - [`get_state_root`](#get_state_root) @@ -823,20 +821,6 @@ def get_shuffling(seed: Bytes32, **Note**: this definition and the next few definitions make heavy use of repetitive computing. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. -### `get_previous_epoch_committee_count` - -```python -def get_previous_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the previous epoch of the given ``state``. - """ - previous_active_validators = get_active_validator_indices( - state.validator_registry, - state.previous_shuffling_epoch, - ) - return get_epoch_committee_count(len(previous_active_validators)) -``` - ### `get_current_epoch_committee_count` ```python @@ -851,20 +835,6 @@ def get_current_epoch_committee_count(state: BeaconState) -> int: return get_epoch_committee_count(len(current_active_validators)) ``` -### `get_next_epoch_committee_count` - -```python -def get_next_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the next epoch of the given ``state``. - """ - next_active_validators = get_active_validator_indices( - state.validator_registry, - get_current_epoch(state) + 1, - ) - return get_epoch_committee_count(len(next_active_validators)) -``` - ### `get_crosslink_committees_at_slot` ```python @@ -895,10 +865,7 @@ def get_crosslink_committees_at_slot(state: BeaconState, state.current_shuffling_start_shard - EPOCH_LENGTH * committees_per_epoch ) % SHARD_COUNT elif epoch == next_epoch: - current_epoch_committees = get_epoch_committee_count(get_active_validator_indices( - state.validator_registry, - current_epoch, - )) + current_epoch_committees = get_current_epoch_committee_count(state) shuffling_start_shard = ( state.current_shuffling_start_shard + EPOCH_LENGTH * current_epoch_committees ) % SHARD_COUNT From c5ee74d5e03376ec5c3bef1d294aaa9a3da831f6 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 19 Mar 2019 11:21:17 -0500 Subject: [PATCH 060/481] Justin fixes --- specs/core/0_beacon-chain.md | 37 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9e52148a0..c14ff9736 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -183,10 +183,8 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MAX_INDICES_PER_SLASHABLE_VOTE` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | -| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) -* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // EPOCH_LENGTH` ### Deposit contract @@ -232,6 +230,10 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | +| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | + +* `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` + ### State list lengths @@ -598,7 +600,7 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'current_shuffling_start_shard': 'uint64', + 'latest_start_shard': 'uint64', # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -859,15 +861,15 @@ def get_crosslink_committees_at_slot(state: BeaconState, )) if epoch == current_epoch: - shuffling_start_shard = state.current_shuffling_start_shard + shuffling_start_shard = state.latest_start_shard elif epoch == previous_epoch: shuffling_start_shard = ( - state.current_shuffling_start_shard - EPOCH_LENGTH * committees_per_epoch + state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch ) % SHARD_COUNT elif epoch == next_epoch: current_epoch_committees = get_current_epoch_committee_count(state) shuffling_start_shard = ( - state.current_shuffling_start_shard + EPOCH_LENGTH * current_epoch_committees + state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees ) % SHARD_COUNT shuffling = get_shuffling( @@ -1483,7 +1485,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), - current_shuffling_start_shard=GENESIS_START_SHARD, + latest_start_shard=GENESIS_START_SHARD, # Finality previous_epoch_attestations=[], @@ -2003,14 +2005,6 @@ def process_ejections(state: BeaconState) -> None: #### Validator registry and shuffling seed data -```python -def should_update_validator_registry(state: BeaconState) -> bool: - # Must have finalized a new block - if state.finalized_epoch <= state.validator_registry_update_epoch: - return False - return True -``` - ```python def update_validator_registry(state: BeaconState) -> None: """ @@ -2060,16 +2054,13 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - next_epoch = current_epoch + 1 # Check if we should update, and if so, update - if should_update_validator_registry(state): + if state.finalized_epoch > state.validator_registry_update_epoch: update_validator_registry(state) - # If we update the registry, update the shuffling data 2/3 or and shards as well - state.current_shuffling_start_shard = ( - state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) + state.latest_start_shard = ( + state.latest_start_shard + + get_current_epoch_committee_count(state) + ) % SHARD_COUNT ``` **Invariant**: the active index root that is hashed into the shuffling seed actually is the `hash_tree_root` of the validator set that is used for that epoch. From ff165c197bc608eb897031231467917114f6fbdb Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 10:57:40 -0600 Subject: [PATCH 061/481] fix vector to be usable in deepcopy --- utils/phase0/minimal_ssz.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/utils/phase0/minimal_ssz.py b/utils/phase0/minimal_ssz.py index 08bd68357..c4828d08f 100644 --- a/utils/phase0/minimal_ssz.py +++ b/utils/phase0/minimal_ssz.py @@ -39,15 +39,22 @@ def SSZType(fields): return SSZObject -class Vector(list): - def __init__(self, x): - list.__init__(self, x) - self.length = len(x) +class Vector(): + def __init__(self, items): + self.items = items + self.length = len(items) - def append(*args): - raise Exception("Cannot change the length of a vector") + def __getitem__(self, key): + return self.items[key] - remove = clear = extend = pop = insert = append + def __setitem__(self, key, value): + self.items[key] = value + + def __iter__(self): + return iter(self.items) + + def __len__(self): + return self.length def is_basic(typ): From b50e148642d4a19d5517ab1ab689708b33ed7b53 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 17:13:25 +0000 Subject: [PATCH 062/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c14ff9736..07179aa0a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -74,7 +74,6 @@ - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) - [`get_attestation_participants`](#get_attestation_participants) - - [`is_power_of_two`](#is_power_of_two) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) - [`get_effective_balance`](#get_effective_balance) @@ -861,16 +860,12 @@ def get_crosslink_committees_at_slot(state: BeaconState, )) if epoch == current_epoch: - shuffling_start_shard = state.latest_start_shard + start_shard = state.latest_start_shard elif epoch == previous_epoch: - shuffling_start_shard = ( - state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch - ) % SHARD_COUNT + start_shard = (state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch) % SHARD_COUNT elif epoch == next_epoch: current_epoch_committees = get_current_epoch_committee_count(state) - shuffling_start_shard = ( - state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees - ) % SHARD_COUNT + start_shard = (state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees) % SHARD_COUNT shuffling = get_shuffling( generate_seed(state, epoch), @@ -879,7 +874,7 @@ def get_crosslink_committees_at_slot(state: BeaconState, ) offset = slot % SLOTS_PER_EPOCH committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH - slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT + slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT return [ ( @@ -1017,16 +1012,6 @@ def get_attestation_participants(state: BeaconState, return participants ``` -### `is_power_of_two` - -```python -def is_power_of_two(value: int) -> bool: - """ - Check if ``value`` is a power of two integer. - """ - return (value > 0) and (value & (value - 1) == 0) -``` - ### `int_to_bytes1`, `int_to_bytes2`, ... `int_to_bytes1(x): return x.to_bytes(1, 'little')`, `int_to_bytes2(x): return x.to_bytes(2, 'little')`, and so on for all integers, particularly 1, 2, 3, 4, 8, 32, 48, 96. From 009563b2c35c9c9fd352e8026d4b1ff4ff9d2e69 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:15:51 -0600 Subject: [PATCH 063/481] fix a few bugs in testing compute_committee --- scripts/phase0/build_spec.py | 23 ++++++++++++----------- specs/core/0_beacon-chain.md | 3 ++- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index ae5a5a4f2..6116f1ffe 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -37,22 +37,23 @@ Store = None code_lines += function_puller.get_lines(sourcefile) code_lines.append(""" -# Monkey patch validator shuffling cache -_get_shuffling = get_shuffling -shuffling_cache = {} -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: +# Monkey patch validator get committee code +_compute_committee = compute_committee +committee_cache = {} +def compute_committee(validator_indices: List[ValidatorIndex], + seed: Bytes32, + index: int, + total_committees: int) -> List[ValidatorIndex]: - param_hash = (seed, hash_tree_root(validators, [Validator]), epoch) + param_hash = (hash_tree_root(validator_indices), seed, index, total_committees) - if param_hash in shuffling_cache: + if param_hash in committee_cache: # print("Cache hit, epoch={0}".format(epoch)) - return shuffling_cache[param_hash] + return committee_cache[param_hash] else: # print("Cache miss, epoch={0}".format(epoch)) - ret = _get_shuffling(seed, validators, epoch) - shuffling_cache[param_hash] = ret + ret = _compute_committee(validator_indices, seed, index, total_committees) + committee_cache[param_hash] = ret return ret diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9708ee591..27ae71c00 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -916,7 +916,8 @@ def get_crosslink_committees_at_slot(state: BeaconState, indices = get_active_validator_indices(state.validator_registry, shuffling_epoch) committee_count = get_epoch_committee_count(len(indices)) - committees_per_slot = committee_count // EPOCH_LENGTH + committees_per_slot = committee_count // SLOTS_PER_EPOCH + offset = slot % SLOTS_PER_EPOCH return [ ( compute_committee(indices, seed, committees_per_slot * offset + i, committee_count) From c8e9073414114cad7b276eb6623457ed8fb1bf86 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:24:36 -0600 Subject: [PATCH 064/481] define get_split_offset and squash a couple of bugs --- specs/core/0_beacon-chain.md | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 27ae71c00..b56a87ad5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -61,7 +61,7 @@ - [`is_active_validator`](#is_active_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_permuted_index`](#get_permuted_index) - - [`split`](#split) + - [`get_split_offset`](#get_split_offset) - [`get_epoch_committee_count`](#get_epoch_committee_count) - [`compute_committee`](#compute_committee) - [`get_previous_epoch_committee_count`](#get_previous_epoch_committee_count) @@ -773,18 +773,11 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: return index ``` -### `split` +### `get_split_offset` ```python -def split(values: List[Any], split_count: int) -> List[List[Any]]: - """ - Splits ``values`` into ``split_count`` pieces. - """ - list_length = len(values) - return [ - values[(list_length * i // split_count): (list_length * (i + 1) // split_count)] - for i in range(split_count) - ] +def get_split_offset(list_length: int, split_count: int, index: int) -> int: + return (list_length * index) // split_count ``` ### `get_epoch_committee_count` @@ -918,9 +911,11 @@ def get_crosslink_committees_at_slot(state: BeaconState, committee_count = get_epoch_committee_count(len(indices)) committees_per_slot = committee_count // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH + slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT + return [ ( - compute_committee(indices, seed, committees_per_slot * offset + i, committee_count) + compute_committee(indices, seed, committees_per_slot * offset + i, committee_count), (slot_start_shard + i) % SHARD_COUNT, ) for i in range(committees_per_slot) From f5826e7f1ce5e46358873514d3a1c9d173fe55aa Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:34:49 -0600 Subject: [PATCH 065/481] small lint --- README.md | 7 ++++++- specs/core/0_beacon-chain.md | 1 - 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c5c88daf9..8f561a9ab 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -# Ethereum 2.0 Specifications +Ethereum 2.0 Specifications +------------ [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) @@ -25,3 +26,7 @@ The following are the broad design goals for Ethereum 2.0: * to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) + +# Executable spec + +The aim is to have the entirety of the Ethereum 2.0Current just the phase 0 spec is executable. diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b56a87ad5..e21607010 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -920,7 +920,6 @@ def get_crosslink_committees_at_slot(state: BeaconState, ) for i in range(committees_per_slot) ] - ``` ### `get_block_root` From f7fab30772b6d70c4a2f84acc171a0432575394e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:40:24 -0600 Subject: [PATCH 066/481] minor adjustment to not repeat committe count calc --- specs/core/0_beacon-chain.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e21607010..9563e22bb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -908,14 +908,13 @@ def get_crosslink_committees_at_slot(state: BeaconState, shuffling_start_shard = state.current_shuffling_start_shard indices = get_active_validator_indices(state.validator_registry, shuffling_epoch) - committee_count = get_epoch_committee_count(len(indices)) - committees_per_slot = committee_count // SLOTS_PER_EPOCH + committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % SHARD_COUNT return [ ( - compute_committee(indices, seed, committees_per_slot * offset + i, committee_count), + compute_committee(indices, seed, committees_per_slot * offset + i, committees_per_epoch), (slot_start_shard + i) % SHARD_COUNT, ) for i in range(committees_per_slot) From ba57d91e7a31b0dab328c7582dd1159b85fdf5d7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 11:41:27 -0600 Subject: [PATCH 067/481] undo readme commit --- README.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 8f561a9ab..c5c88daf9 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -Ethereum 2.0 Specifications ------------- +# Ethereum 2.0 Specifications [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) @@ -26,7 +25,3 @@ The following are the broad design goals for Ethereum 2.0: * to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) - -# Executable spec - -The aim is to have the entirety of the Ethereum 2.0Current just the phase 0 spec is executable. From 3f46010fa8836b68c7d03aca330ec238865583ad Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 12:29:06 -0600 Subject: [PATCH 068/481] modify validator ejecion test to fit PR --- tests/phase0/test_sanity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 8f04f316c..d1811cd00 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -433,7 +433,7 @@ def test_ejection(state): block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) - assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].initiated_exit == True return pre_state, [block], post_state From 23ef802da5426e0c573417f79f37ecac8500b0b3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 13:25:34 -0600 Subject: [PATCH 069/481] fix small bug in sytax --- specs/core/0_beacon-chain.md | 2 +- tests/phase0/test_sanity.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index aa9fc1e7f..a834a1cde 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2089,7 +2089,7 @@ def update_validator_registry(state: BeaconState) -> None: activate_validator(state, index, is_genesis=False) # Exit validators within the allowable balance churn - if state.current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: + if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: balance_churn = ( state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index d1811cd00..56c1c1a64 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -316,12 +316,18 @@ def test_attestation(state, pubkeys, privkeys): def test_voluntary_exit(state, pubkeys, privkeys): pre_state = deepcopy(state) - validator_index = get_active_validator_indices(pre_state.validator_registry, get_current_epoch(pre_state))[-1] + validator_index = get_active_validator_indices( + pre_state.validator_registry, + get_current_epoch(pre_state) + )[-1] # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # artificially trigger registry update at next epoch transition - pre_state.validator_registry_update_epoch -= 1 + pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 + for crosslink in pre_state.latest_crosslinks: + crosslink.epoch = pre_state.finalized_epoch + pre_state.validator_registry_update_epoch = pre_state.finalized_epoch - 1 post_state = deepcopy(pre_state) From 24f1139d0938d2360f724cc4de7c1c23160b0157 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 13:39:45 -0600 Subject: [PATCH 070/481] add explicit test that ensures exists are blocked when too long since registry change --- tests/phase0/test_sanity.py | 38 +++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 56c1c1a64..8c7e7d28b 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -369,6 +369,44 @@ def test_voluntary_exit(state, pubkeys, privkeys): return pre_state, [initiate_exit_block, exit_block], post_state +def test_no_exit_too_long_since_change(state): + pre_state = deepcopy(state) + validator_index = get_active_validator_indices( + pre_state.validator_registry, + get_current_epoch(pre_state) + )[-1] + + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + # artificially trigger registry update at next epoch transition + pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 + for crosslink in pre_state.latest_crosslinks: + crosslink.epoch = pre_state.finalized_epoch + # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH + pre_state.validator_registry_update_epoch = ( + get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH + ) + # set validator to have previously initiated exit + pre_state.validator_registry[validator_index].initiated_exit = True + + post_state = deepcopy(pre_state) + + # + # Process registry change but ensure no exit + # + block = build_empty_block_for_next_slot(post_state) + block.slot += spec.SLOTS_PER_EPOCH + state_transition(post_state, block) + + assert post_state.validator_registry_update_epoch == get_current_epoch(post_state) - 1 + assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + + return pre_state, [block], post_state + + def test_transfer(state, pubkeys, privkeys): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) From b664453a342d88b20a351a90e4aac7ce5a901fa5 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 19 Mar 2019 20:43:05 +0000 Subject: [PATCH 071/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 940343e51..910646487 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2435,8 +2435,9 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: assert validator.initiated_exit is False # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch - # Must have been in the validator set long enough - assert validator.activation_epoch != FAR_FUTURE_EPOCH + # Verify the validator is active + assert is_active_validator(validator, state) + # Verify the validator has been active long enough assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature assert bls_verify( @@ -2445,7 +2446,7 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: signature=exit.signature, domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) ) - # Run the exit + # Initiate exit initiate_validator_exit(state, exit.validator_index) ``` From ad636a8252f83a5e1be2714c3ad3fc5a299da4ed Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 15:00:29 -0600 Subject: [PATCH 072/481] add no salshed proposer test --- tests/phase0/test_process_block_header.py | 26 +++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 tests/phase0/test_process_block_header.py diff --git a/tests/phase0/test_process_block_header.py b/tests/phase0/test_process_block_header.py new file mode 100644 index 000000000..83d99e574 --- /dev/null +++ b/tests/phase0/test_process_block_header.py @@ -0,0 +1,26 @@ +from copy import deepcopy +import pytest + + +from build.phase0.spec import ( + get_beacon_proposer_index, + process_block_header, +) +from tests.phase0.helpers import ( + build_empty_block_for_next_slot, +) + +# mark entire file as 'sanity' and 'header' +pytestmark = [pytest.mark.sanity, pytest.mark.header] + + +def test_proposer_slashed(state): + pre_state = deepcopy(state) + + block = build_empty_block_for_next_slot(pre_state) + proposer_index = get_beacon_proposer_index(pre_state, block.slot) + pre_state.validator_registry[proposer_index].slashed = True + with pytest.raises(AssertionError): + process_block_header(pre_state, block) + + return state, [block], None From acd7fdd762b19a3758e4fadd481f672b7843d32b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 15:49:01 -0600 Subject: [PATCH 073/481] add a few voluntary exit tests --- Makefile | 2 +- specs/core/0_beacon-chain.md | 4 +- .../test_process_block_header.py | 4 +- .../block_processing/test_voluntary_exit.py | 170 ++++++++++++++++++ tests/phase0/conftest.py | 6 + tests/phase0/helpers.py | 28 +++ tests/phase0/test_sanity.py | 12 +- 7 files changed, 213 insertions(+), 13 deletions(-) rename tests/phase0/{ => block_processing}/test_process_block_header.py (85%) create mode 100644 tests/phase0/block_processing/test_voluntary_exit.py diff --git a/Makefile b/Makefile index b45cec410..88f17dcf9 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ clean: # runs a limited set of tests against a minimal config # run pytest with `-m` option to full suite test: - pytest -m "sanity and minimal_config" tests/ + pytest -m minimal_config tests/ $(BUILD_DIR)/phase0: diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 85e35d595..212cedb95 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2431,14 +2431,14 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[exit.validator_index] + # Verify the validator is active + assert is_active_validator(validator, get_current_epoch(state)) # Verify the validator has not yet exited assert validator.exit_epoch == FAR_FUTURE_EPOCH # Verify the validator has not initiated an exit assert validator.initiated_exit is False # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch - # Verify the validator is active - assert is_active_validator(validator, state) # Verify the validator has been active long enough assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature diff --git a/tests/phase0/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py similarity index 85% rename from tests/phase0/test_process_block_header.py rename to tests/phase0/block_processing/test_process_block_header.py index 83d99e574..4ec7e336f 100644 --- a/tests/phase0/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -10,8 +10,8 @@ from tests.phase0.helpers import ( build_empty_block_for_next_slot, ) -# mark entire file as 'sanity' and 'header' -pytestmark = [pytest.mark.sanity, pytest.mark.header] +# mark entire file as 'header' +pytestmark = pytest.mark.header def test_proposer_slashed(state): diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py new file mode 100644 index 000000000..80fad86a1 --- /dev/null +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -0,0 +1,170 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.spec import ( + get_active_validator_indices, + get_current_epoch, + process_voluntary_exit, +) +from tests.phase0.helpers import ( + build_voluntary_exit, +) + + +def test_success(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + # + # build voluntary exit + # + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + post_state = deepcopy(pre_state) + + # + # test valid exit + # + process_voluntary_exit(post_state, voluntary_exit) + + assert not pre_state.validator_registry[validator_index].initiated_exit + assert post_state.validator_registry[validator_index].initiated_exit + + return pre_state, voluntary_exit, post_state + + +def test_validator_not_active(state, pub_to_priv): + pre_state = deepcopy(state) + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # + # setup pre_state + # + pre_state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH + + # + # build and test voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None + + +def test_validator_already_exited(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # but validator already has exited + pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 + + # + # build voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None + + +def test_validator_already_initiated_exit(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit + pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # but validator already has initiated exit + pre_state.validator_registry[validator_index].initiated_exit = True + + # + # build voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None + + +def test_validator_not_active_long_enough(state, pub_to_priv): + pre_state = deepcopy(state) + # + # setup pre_state + # + current_epoch = get_current_epoch(pre_state) + validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + + # but validator already has initiated exit + pre_state.validator_registry[validator_index].initiated_exit = True + + # + # build voluntary exit + # + voluntary_exit = build_voluntary_exit( + pre_state, + current_epoch, + validator_index, + privkey, + ) + + assert ( + current_epoch - pre_state.validator_registry[validator_index].activation_epoch < + spec.PERSISTENT_COMMITTEE_PERIOD + ) + + with pytest.raises(AssertionError): + process_voluntary_exit(pre_state, voluntary_exit) + + return pre_state, voluntary_exit, None diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index e92896e92..395929028 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -5,6 +5,7 @@ from build.phase0 import spec from tests.phase0.helpers import ( privkeys_list, pubkeys_list, + pubkey_to_privkey, create_genesis_state, ) @@ -34,6 +35,11 @@ def pubkeys(): return pubkeys_list +@pytest.fixture +def pub_to_priv(): + return pubkey_to_privkey + + def overwrite_spec_config(config): for field in config: setattr(spec, field, config[field]) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 510361e9c..2c7994079 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -13,6 +13,7 @@ from build.phase0.spec import ( DepositInput, DepositData, Eth1Data, + VoluntaryExit, # functions get_block_root, get_current_epoch, @@ -82,6 +83,14 @@ def create_genesis_state(num_validators, deposit_data_leaves): ) +def force_registry_change_at_next_epoch(state): + # artificially trigger registry update at next epoch transition + state.finalized_epoch = get_current_epoch(state) - 1 + for crosslink in state.latest_crosslinks: + crosslink.epoch = state.finalized_epoch + state.validator_registry_update_epoch = state.finalized_epoch - 1 + + def build_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 @@ -143,3 +152,22 @@ def build_attestation_data(state, slot, shard): crosslink_data_root=spec.ZERO_HASH, previous_crosslink=deepcopy(state.latest_crosslinks[shard]), ) + + +def build_voluntary_exit(state, epoch, validator_index, privkey): + voluntary_exit = VoluntaryExit( + epoch=epoch, + validator_index=validator_index, + signature=EMPTY_SIGNATURE, + ) + voluntary_exit.signature = bls.sign( + message_hash=signed_root(voluntary_exit), + privkey=privkey, + domain=get_domain( + fork=state.fork, + epoch=epoch, + domain_type=spec.DOMAIN_VOLUNTARY_EXIT, + ) + ) + + return voluntary_exit diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 8c7e7d28b..b9d44a72c 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -43,6 +43,7 @@ from tests.phase0.helpers import ( build_attestation_data, build_deposit_data, build_empty_block_for_next_slot, + force_registry_change_at_next_epoch, ) @@ -324,10 +325,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # artificially trigger registry update at next epoch transition - pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 - for crosslink in pre_state.latest_crosslinks: - crosslink.epoch = pre_state.finalized_epoch - pre_state.validator_registry_update_epoch = pre_state.finalized_epoch - 1 + force_registry_change_at_next_epoch(pre_state) post_state = deepcopy(pre_state) @@ -369,7 +367,7 @@ def test_voluntary_exit(state, pubkeys, privkeys): return pre_state, [initiate_exit_block, exit_block], post_state -def test_no_exit_too_long_since_change(state): +def test_no_exit_churn_too_long_since_change(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( pre_state.validator_registry, @@ -382,9 +380,7 @@ def test_no_exit_too_long_since_change(state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH # artificially trigger registry update at next epoch transition - pre_state.finalized_epoch = get_current_epoch(pre_state) - 1 - for crosslink in pre_state.latest_crosslinks: - crosslink.epoch = pre_state.finalized_epoch + force_registry_change_at_next_epoch(pre_state) # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH pre_state.validator_registry_update_epoch = ( get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH From 472d9c5c20a93c0b1608013c03f5ca92a0a9a1d8 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Tue, 19 Mar 2019 15:32:38 -0700 Subject: [PATCH 074/481] Updates from review --- specs/networking/messaging.md | 2 ++ specs/networking/rpc-interface.md | 24 +++++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/specs/networking/messaging.md b/specs/networking/messaging.md index de92fe6d4..b64e1d5d8 100644 --- a/specs/networking/messaging.md +++ b/specs/networking/messaging.md @@ -11,6 +11,8 @@ The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the ETH 2.0 specification evolves. +Note that while `libp2p` is the chosen networking stack for Ethereum 2.0, as of this writing some clients do not have workable `libp2p` implementations. To allow those clients to communicate, we define a message envelope that includes the body's compression, encoding, and body length. Once `libp2p` is available across all implementations, this message envelope will be removed because `libp2p` will negotiate the values defined in the envelope upfront. + # Specification ## Message Structure diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index f505a4663..ef85f32d5 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -34,7 +34,9 @@ A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp ## RPC-Over-`libp2p` -To facilitate RPC-over-`libp2p`, a single protocol path is used: `/eth/serenity/beacon/rpc/1.0.0`. Remote method calls are wrapped in a "request" structure: +To facilitate RPC-over-`libp2p`, a single protocol name is used: `/eth/serenity/beacon/rpc/1`. The version number in the protocol name is neither backwards or forwards compatible, and will be incremented whenever changes to the below structures are required. + +Remote method calls are wrapped in a "request" structure: ``` ( @@ -88,6 +90,10 @@ The first 1,000 values in `error.code` are reserved for system use. The followin 3. `20`: Method not found. 4. `30`: Server error. +### Alternative for Non-`libp2p` Clients + +Since some clients are waiting for `libp2p` implementations in their respective languages. As such, they MAY listen for raw TCP messages on port `9000`. To distinguish RPC messages from other messages on that port, a byte prefix of `ETH` (`0x455448`) MUST be prepended to all messages. This option will be removed once `libp2p` is ready in all supported languages. + ## Messages ### Hello @@ -154,12 +160,13 @@ Once the handshake completes, the client with the higher `latest_finalized_epoch ) ``` -Client MAY send `goodbye` messages upon disconnection. The reason field MUST be one of the following values: +Client MAY send `goodbye` messages upon disconnection. The reason field MAY be one of the following values: - `1`: Client shut down. - `2`: Irrelevant network. -- `3`: Too many peers. -- `4`: Fault/error. +- `3`: Fault/error. + +Clients MAY define custom goodbye reasons as long as the value is larger than `1000`. ### Request Beacon Block Roots @@ -168,7 +175,10 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be **Request Body** ``` -() +( + start_slot: uint64 + count: uint64 +) ``` **Response Body:** @@ -185,7 +195,7 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MUST be ) ``` -Send a list of block roots and slots to the requesting peer. +Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. ### Beacon Block Headers @@ -210,7 +220,7 @@ Send a list of block roots and slots to the requesting peer. ) ``` -Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `2` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. +Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks. The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. From cf4f3463a9aecb7052a0c6d196421f0483e35c75 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 17:10:36 -0600 Subject: [PATCH 075/481] add deposit size check in state transiton. add deposit tests --- .../block_processing/test_process_deposit.py | 132 ++++++++++++++++++ .../block_processing/test_voluntary_exit.py | 4 + tests/phase0/helpers.py | 30 +++- tests/phase0/test_sanity.py | 2 + utils/phase0/state_transition.py | 13 ++ 5 files changed, 180 insertions(+), 1 deletion(-) create mode 100644 tests/phase0/block_processing/test_process_deposit.py diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py new file mode 100644 index 000000000..297ad37f1 --- /dev/null +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -0,0 +1,132 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.spec import ( + Deposit, + process_deposit, +) +from tests.phase0.helpers import ( + build_deposit, +) + + +# mark entire file as 'voluntary_exits' +pytestmark = pytest.mark.voluntary_exits + + +def test_success(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + index = len(deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + spec.MAX_DEPOSIT_AMOUNT, + ) + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + post_state = deepcopy(pre_state) + + process_deposit(post_state, deposit) + + assert len(post_state.validator_registry) == len(state.validator_registry) + 1 + assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert post_state.validator_registry[index].pubkey == pubkeys[index] + assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count + + return pre_state, deposit, post_state + + +def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + validator_index = 0 + amount = spec.MAX_DEPOSIT_AMOUNT // 4 + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + amount, + ) + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + pre_balance = pre_state.validator_balances[validator_index] + + post_state = deepcopy(pre_state) + + process_deposit(post_state, deposit) + + assert len(post_state.validator_registry) == len(state.validator_registry) + assert len(post_state.validator_balances) == len(state.validator_balances) + assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count + assert post_state.validator_balances[validator_index] == pre_balance + amount + + return pre_state, deposit, post_state + + +def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + index = len(deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + spec.MAX_DEPOSIT_AMOUNT, + ) + + # mess up deposit_index + deposit.index = pre_state.deposit_index + 1 + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + post_state = deepcopy(pre_state) + + with pytest.raises(AssertionError): + process_deposit(post_state, deposit) + + return pre_state, deposit, None + + +def test_bad_merkle_proof(state, deposit_data_leaves, pubkeys, privkeys): + pre_state = deepcopy(state) + + index = len(deposit_data_leaves) + pubkey = pubkeys[index] + privkey = privkeys[index] + deposit, root, deposit_data_leaves = build_deposit( + pre_state, + deposit_data_leaves, + pubkey, + privkey, + spec.MAX_DEPOSIT_AMOUNT, + ) + + # mess up merkle branch + deposit.proof[-1] = spec.ZERO_HASH + + pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + post_state = deepcopy(pre_state) + + with pytest.raises(AssertionError): + process_deposit(post_state, deposit) + + return pre_state, deposit, None diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 80fad86a1..0801e4292 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -13,6 +13,10 @@ from tests.phase0.helpers import ( ) +# mark entire file as 'voluntary_exits' +pytestmark = pytest.mark.voluntary_exits + + def test_success(state, pub_to_priv): pre_state = deepcopy(state) # diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 2c7994079..5c61685a6 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -72,12 +72,16 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): def create_genesis_state(num_validators, deposit_data_leaves): - initial_deposits, deposit_root = create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves) + initial_deposits, deposit_root = create_mock_genesis_validator_deposits( + num_validators, + deposit_data_leaves, + ) return get_genesis_beacon_state( initial_deposits, genesis_time=0, genesis_eth1_data=Eth1Data( deposit_root=deposit_root, + deposit_count=len(initial_deposits), block_hash=spec.ZERO_HASH, ), ) @@ -171,3 +175,27 @@ def build_voluntary_exit(state, epoch, validator_index, privkey): ) return voluntary_exit + + +def build_deposit(state, + deposit_data_leaves, + pubkey, + privkey, + amount): + deposit_data = build_deposit_data(state, pubkey, privkey, amount) + + item = hash(deposit_data.serialize()) + index = len(deposit_data_leaves) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + root = get_merkle_root((tuple(deposit_data_leaves))) + proof = list(get_merkle_proof(tree, item_index=index)) + assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) + + deposit = Deposit( + proof=list(proof), + index=index, + deposit_data=deposit_data, + ) + + return deposit, root, deposit_data_leaves diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index b9d44a72c..91bd9fe7a 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -196,6 +196,7 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): ) pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(test_deposit_data_leaves) post_state = deepcopy(pre_state) block = build_empty_block_for_next_slot(post_state) block.body.deposits.append(deposit) @@ -233,6 +234,7 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): ) pre_state.latest_eth1_data.deposit_root = root + pre_state.latest_eth1_data.deposit_count = len(test_deposit_data_leaves) block = build_empty_block_for_next_slot(pre_state) block.body.deposits.append(deposit) diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py index eefc3d409..88c4f934a 100644 --- a/utils/phase0/state_transition.py +++ b/utils/phase0/state_transition.py @@ -15,6 +15,13 @@ from .spec import ( ) +def expected_deposit_count(state: BeaconState) -> int: + return min( + spec.MAX_DEPOSITS, + state.latest_eth1_data.deposit_count - state.deposit_index + ) + + def process_transaction_type(state: BeaconState, transactions: List[Any], max_transactions: int, @@ -31,30 +38,36 @@ def process_transactions(state: BeaconState, block: BeaconBlock) -> None: spec.MAX_PROPOSER_SLASHINGS, spec.process_proposer_slashing, ) + process_transaction_type( state, block.body.attester_slashings, spec.MAX_ATTESTER_SLASHINGS, spec.process_attester_slashing, ) + process_transaction_type( state, block.body.attestations, spec.MAX_ATTESTATIONS, spec.process_attestation, ) + + assert len(block.body.deposits) == expected_deposit_count(state) process_transaction_type( state, block.body.deposits, spec.MAX_DEPOSITS, spec.process_deposit, ) + process_transaction_type( state, block.body.voluntary_exits, spec.MAX_VOLUNTARY_EXITS, spec.process_voluntary_exit, ) + assert len(block.body.transfers) == len(set(block.body.transfers)) process_transaction_type( state, From 1083de0c616aacb78e66363e53eeabac4a1f8f5e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 19 Mar 2019 17:38:09 -0600 Subject: [PATCH 076/481] add notes about mandatory deposits in validator guide --- specs/validator/0_beacon-chain-validator.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1a4bddf9e..62a7011b4 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -212,25 +212,25 @@ block_signature = bls_sign( ##### Proposer slashings -Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings-1). The validator receives a small "whistleblower" reward for each proposer slashing found and included. +Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. ##### Attester slashings -Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings-1). The validator receives a small "whistleblower" reward for each attester slashing found and included. +Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. ##### Attestations -Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. +Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. ##### Deposits -Up to `MAX_DEPOSITS` [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) objects can be included in the `block`. These deposits are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits-1). +If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. ##### Voluntary exits -Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#exits-1). +Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntary-exits). ### Attestations From 833691b8afe9ca68c75588e0f528780f200de0ee Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 20 Mar 2019 08:16:39 +0000 Subject: [PATCH 077/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4935ab7d7..099d12b95 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -256,7 +256,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | * The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch. -* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1-1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1-1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. +* The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. ### Max transactions per block @@ -789,7 +789,7 @@ def decrease_balance(state: BeaconState, index: int, delta: int) -> None: ```python def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: """ - Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. + Return `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy. Utilizes 'swap or not' shuffling found in https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf @@ -1376,7 +1376,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) state.balances.append(0) - set_balance(state, len(state.validator_registry)-1, amount) + set_balance(state, len(state.validator_registry) - 1, amount) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) From dde49cbedafa4301f33dea56ad0830d866ae5a57 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 20 Mar 2019 08:47:41 -0600 Subject: [PATCH 078/481] fix and extend header tests --- .../test_process_block_header.py | 90 ++++++++++++++++++- 1 file changed, 86 insertions(+), 4 deletions(-) diff --git a/tests/phase0/block_processing/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py index 4ec7e336f..650bc387c 100644 --- a/tests/phase0/block_processing/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -4,6 +4,8 @@ import pytest from build.phase0.spec import ( get_beacon_proposer_index, + cache_state, + advance_slot, process_block_header, ) from tests.phase0.helpers import ( @@ -14,13 +16,93 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.header -def test_proposer_slashed(state): +def test_sucess(state): + pre_state = deepcopy(state) + block = build_empty_block_for_next_slot(pre_state) + + # + # setup pre_state to be ready for block transition + # + cache_state(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # + process_block_header(post_state, block) + + return state, [block], post_state + + +def test_invalid_slot(state): pre_state = deepcopy(state) + # mess up previous block root block = build_empty_block_for_next_slot(pre_state) - proposer_index = get_beacon_proposer_index(pre_state, block.slot) - pre_state.validator_registry[proposer_index].slashed = True + block.previous_block_root = b'\12'*32 + + # + # setup pre_state advancing two slots to induce error + # + cache_state(pre_state) + advance_slot(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # with pytest.raises(AssertionError): - process_block_header(pre_state, block) + process_block_header(post_state, block) + + return state, [block], None + + +def test_invalid_previous_block_root(state): + pre_state = deepcopy(state) + + # mess up previous block root + block = build_empty_block_for_next_slot(pre_state) + block.previous_block_root = b'\12'*32 + + # + # setup pre_state to be ready for block transition + # + cache_state(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # + with pytest.raises(AssertionError): + process_block_header(post_state, block) + + return state, [block], None + + +def test_proposer_slashed(state): + pre_state = deepcopy(state) + proposer_index = get_beacon_proposer_index(pre_state, pre_state.slot + 1) + pre_state.validator_registry[proposer_index].slashed = True + block = build_empty_block_for_next_slot(pre_state) + + # + # setup pre_state to be ready for block transition + # + cache_state(pre_state) + advance_slot(pre_state) + + post_state = deepcopy(pre_state) + + # + # test block header + # + with pytest.raises(AssertionError): + process_block_header(post_state, block) return state, [block], None From f4012ee309dfa5b238bd55e05acd0c041e8c9280 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 20 Mar 2019 09:59:29 -0600 Subject: [PATCH 079/481] make process block header tests more modular --- .../test_process_block_header.py | 107 ++++++------------ 1 file changed, 35 insertions(+), 72 deletions(-) diff --git a/tests/phase0/block_processing/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py index 650bc387c..4981b656c 100644 --- a/tests/phase0/block_processing/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -16,93 +16,56 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.header -def test_sucess(state): - pre_state = deepcopy(state) - block = build_empty_block_for_next_slot(pre_state) +def prepare_state_for_header_processing(state): + cache_state(state) + advance_slot(state) - # - # setup pre_state to be ready for block transition - # - cache_state(pre_state) - advance_slot(pre_state) - post_state = deepcopy(pre_state) +def run_block_header_processing(state, block, valid=True): + """ + Run ``process_block_header`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + prepare_state_for_header_processing(state) + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_block_header(post_state, block) + return state, None - # - # test block header - # process_block_header(post_state, block) + return state, post_state - return state, [block], post_state + +def test_success(state): + block = build_empty_block_for_next_slot(state) + pre_state, post_state = run_block_header_processing(state, block) + return state, block, post_state def test_invalid_slot(state): - pre_state = deepcopy(state) + block = build_empty_block_for_next_slot(state) + block.slot = state.slot + 2 # invalid slot - # mess up previous block root - block = build_empty_block_for_next_slot(pre_state) - block.previous_block_root = b'\12'*32 - - # - # setup pre_state advancing two slots to induce error - # - cache_state(pre_state) - advance_slot(pre_state) - advance_slot(pre_state) - - post_state = deepcopy(pre_state) - - # - # test block header - # - with pytest.raises(AssertionError): - process_block_header(post_state, block) - - return state, [block], None + pre_state, post_state = run_block_header_processing(state, block, valid=False) + return pre_state, block, None def test_invalid_previous_block_root(state): - pre_state = deepcopy(state) + block = build_empty_block_for_next_slot(state) + block.previous_block_root = b'\12'*32 # invalid prev root - # mess up previous block root - block = build_empty_block_for_next_slot(pre_state) - block.previous_block_root = b'\12'*32 - - # - # setup pre_state to be ready for block transition - # - cache_state(pre_state) - advance_slot(pre_state) - - post_state = deepcopy(pre_state) - - # - # test block header - # - with pytest.raises(AssertionError): - process_block_header(post_state, block) - - return state, [block], None + pre_state, post_state = run_block_header_processing(state, block, valid=False) + return pre_state, block, None def test_proposer_slashed(state): - pre_state = deepcopy(state) - proposer_index = get_beacon_proposer_index(pre_state, pre_state.slot + 1) - pre_state.validator_registry[proposer_index].slashed = True - block = build_empty_block_for_next_slot(pre_state) + # set proposer to slashed + proposer_index = get_beacon_proposer_index(state, state.slot + 1) + state.validator_registry[proposer_index].slashed = True - # - # setup pre_state to be ready for block transition - # - cache_state(pre_state) - advance_slot(pre_state) + block = build_empty_block_for_next_slot(state) - post_state = deepcopy(pre_state) - - # - # test block header - # - with pytest.raises(AssertionError): - process_block_header(post_state, block) - - return state, [block], None + pre_state, post_state = run_block_header_processing(state, block, valid=False) + return pre_state, block, None From ced6208d55d26d63f532d4bb031869740b2a111c Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 20 Mar 2019 11:49:28 -0500 Subject: [PATCH 080/481] Edits * shuffled committee -> period committee * Reduced code redundancy --- specs/light_client/sync_protocol.md | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 2a420abcb..a8cdd50c2 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -37,25 +37,15 @@ We add a data type `PeriodData` and four helpers: ```python def get_earlier_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2 + def get_later_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD -def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_earlier_start_epoch(header.slot) +def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> PeriodData: + period_start = get_later_start_epoch(header.slot) if later else get_earlier_start_epoch(header.slot) validator_count = len(get_active_validator_indices(state, period_start)) committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 - indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) - return PeriodData( - validator_count, - generate_seed(block.state, period_start), - [block.state.validator_registry[i] for i in indices] - ) - -def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_later_start_epoch(header.slot) - validator_count = len(get_active_validator_indices(state, period_start)) - committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 - indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) + indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count) return PeriodData( validator_count, generate_seed(block.state, period_start), @@ -69,18 +59,18 @@ A light client will keep track of: * A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever) * A block header that they consider to be finalized (`finalized_header`) and do not expect to revert. -* `later_period_data = get_later_period_data(finalized_header, shard_id)` -* `earlier_period_data = get_earlier_period_data(finalized_header, shard_id)` +* `later_period_data = get_period_data(finalized_header, shard_id, later=True)` +* `earlier_period_data = get_period_data(finalized_header, shard_id, later=False)` We use the struct `validator_memory` to keep track of these variables. ### Updating the shuffled committee -If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_later_period_data, validator_memory.finalized_header, shard_id)`. It can then compute: +If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_period_data, validator_memory.finalized_header, shard_id, later=True)`. It can then compute: ```python earlier_period_data = later_period_data -later_period_data = get_later_period_data(new_committee_proof, finalized_header, shard_id) +later_period_data = get_period_data(new_committee_proof, finalized_header, shard_id, later=True) ``` The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch. From 8794d03517ea2b6160f032d6619fe01594f2a645 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Wed, 20 Mar 2019 19:04:04 -0700 Subject: [PATCH 081/481] Updates with Whiteblock --- specs/networking/rpc-interface.md | 59 ++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index ef85f32d5..51dc3a900 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -51,8 +51,8 @@ and their corresponding responses are wrapped in a "response" structure: ``` ( id: uint64 - is_error: boolean - result: Response + response_code: uint16 + result: bytes ) ``` @@ -61,11 +61,8 @@ If an error occurs, a variant of the response structure is returned: ``` ( id: uint64 - is_error: boolean - result: ( - code: uint16 - data: bytes - ) + response_code: uint16 + result: bytes ) ``` @@ -75,20 +72,21 @@ The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](http 2. The `id` member in the response MUST be the same as the value of the `id` in the request. 3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED. 4. The `method_id` member is REQUIRED. -5. The `result` member is required on success, and MUST NOT exist if there was an error. -6. The `error` member is REQUIRED on errors, and MUST NOT exist if there wasn't an error. -7. `is_error` MUST be `true` on errors, or `false` otherwise. +5. The `result` member is REQUIRED on success. +6. The `result` member is OPTIONAL on errors, and MAY contain additional information about the error. +7. `response_code` MUST be `0` on success. Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests. The "method ID" fields in the below messages refer to the `method` field in the request structure above. -The first 1,000 values in `error.code` are reserved for system use. The following error codes are predefined: +The first 1,000 values in `response_code` are reserved for system use. The following response codes are predefined: -1. `0`: Parse error. -2. `10`: Invalid request. -3. `20`: Method not found. -4. `30`: Server error. +1. `0`: No error. +2. `10`: Parse error. +2. `20`: Invalid request. +3. `30`: Method not found. +4. `40`: Server error. ### Alternative for Non-`libp2p` Clients @@ -105,6 +103,7 @@ Since some clients are waiting for `libp2p` implementations in their respective ``` ( network_id: uint8 + chain_id: uint8 latest_finalized_root: bytes32 latest_finalized_epoch: uint64 best_root: bytes32 @@ -168,6 +167,32 @@ Client MAY send `goodbye` messages upon disconnection. The reason field MAY be o Clients MAY define custom goodbye reasons as long as the value is larger than `1000`. +### Get Status + +**Method ID:** `2` + +**Request Body:** + +``` +( + sha: bytes32 + user_agent: bytes + timestamp: uint64 +) +``` + +**Response Body:** + +``` +( + sha: bytes32 + user_agent: bytes + timestamp: uint64 +) +``` + +Returns metadata about the remote node. + ### Request Beacon Block Roots **Method ID:** `10` @@ -195,7 +220,7 @@ Clients MAY define custom goodbye reasons as long as the value is larger than `1 ) ``` -Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. +Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. The slots MUST be returned in ascending slot order. ### Beacon Block Headers @@ -216,7 +241,7 @@ Requests a list of block roots and slots from the peer. The `count` parameter MU ``` ( - headers: []BlockHeader + headers: []BeaconBlockHeader ) ``` From fdcfc910080f283f62926954150a47ffb681224e Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 07:38:25 -0500 Subject: [PATCH 082/481] Add docstring into get_split_offset --- specs/core/0_beacon-chain.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d32aa1a0e..e198f5c35 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -779,6 +779,10 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: ```python def get_split_offset(list_length: int, split_count: int, index: int) -> int: + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i + 1)] + """ return (list_length * index) // split_count ``` From fd6d80fcb648a397fc43644199bf78d267d8a988 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 08:24:26 -0600 Subject: [PATCH 083/481] remove get_split_offset from phase 1 doc --- specs/core/0_beacon-chain.md | 12 ++++++------ specs/core/1_shard-data-chains.md | 12 ------------ 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e198f5c35..1067c3dc0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -778,12 +778,12 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: ### `get_split_offset` ```python -def get_split_offset(list_length: int, split_count: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i + 1)] - """ - return (list_length * index) // split_count +def get_split_offset(list_size: int, chunks: int, index: int) -> int: + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] + """ + return (list_size * index) // chunks ``` ### `get_epoch_committee_count` diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index c76f9ba08..92cee4d19 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -19,7 +19,6 @@ At the current stage, Phase 1, while fundamentally feature-complete, is still su - [Signature domains](#signature-domains) - [Shard chains and crosslink data](#shard-chains-and-crosslink-data) - [Helper functions](#helper-functions) - - [`get_split_offset`](#get_split_offset) - [`get_shuffled_committee`](#get_shuffled_committee) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) @@ -122,17 +121,6 @@ Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md ## Helper functions -#### `get_split_offset` - -````python -def get_split_offset(list_size: int, chunks: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] - """ - return (list_size * index) // chunks -```` - #### `get_shuffled_committee` ```python From 47477b8e55dba85f7e4e12c3b0cf99bc594ac81d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 09:37:06 -0600 Subject: [PATCH 084/481] cleanup tests to use get_balance and set_balance --- .../block_processing/test_process_deposit.py | 10 ++++---- tests/phase0/test_sanity.py | 23 +++++++++++-------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py index 297ad37f1..9f1b6add6 100644 --- a/tests/phase0/block_processing/test_process_deposit.py +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -5,6 +5,7 @@ import build.phase0.spec as spec from build.phase0.spec import ( Deposit, + get_balance, process_deposit, ) from tests.phase0.helpers import ( @@ -38,8 +39,9 @@ def test_success(state, deposit_data_leaves, pubkeys, privkeys): process_deposit(post_state, deposit) assert len(post_state.validator_registry) == len(state.validator_registry) + 1 - assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert len(post_state.balances) == len(state.balances) + 1 assert post_state.validator_registry[index].pubkey == pubkeys[index] + assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count return pre_state, deposit, post_state @@ -62,16 +64,16 @@ def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): pre_state.latest_eth1_data.deposit_root = root pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - pre_balance = pre_state.validator_balances[validator_index] + pre_balance = get_balance(pre_state, validator_index) post_state = deepcopy(pre_state) process_deposit(post_state, deposit) assert len(post_state.validator_registry) == len(state.validator_registry) - assert len(post_state.validator_balances) == len(state.validator_balances) + assert len(post_state.balances) == len(state.balances) assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count - assert post_state.validator_balances[validator_index] == pre_balance + amount + assert get_balance(post_state, validator_index) == pre_balance + amount return pre_state, deposit, post_state diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 91bd9fe7a..ec03fb355 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -21,6 +21,7 @@ from build.phase0.spec import ( # functions get_active_validator_indices, get_attestation_participants, + get_balance, get_block_root, get_crosslink_committees_at_slot, get_current_epoch, @@ -28,6 +29,7 @@ from build.phase0.spec import ( get_state_root, advance_slot, cache_state, + set_balance, verify_merkle_branch, hash, ) @@ -168,7 +170,7 @@ def test_proposer_slashing(state, pubkeys, privkeys): assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH # lost whistleblower reward - assert test_state.validator_balances[validator_index] < state.validator_balances[validator_index] + assert get_balance(test_state, validator_index) < get_balance(state, validator_index) return state, [block], test_state @@ -203,7 +205,8 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): state_transition(post_state, block) assert len(post_state.validator_registry) == len(state.validator_registry) + 1 - assert len(post_state.validator_balances) == len(state.validator_balances) + 1 + assert len(post_state.balances) == len(state.balances) + 1 + assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT assert post_state.validator_registry[index].pubkey == pubkeys[index] return pre_state, [block], post_state @@ -238,12 +241,12 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): block = build_empty_block_for_next_slot(pre_state) block.body.deposits.append(deposit) - pre_balance = pre_state.validator_balances[validator_index] + pre_balance = get_balance(pre_state, validator_index) post_state = deepcopy(pre_state) state_transition(post_state, block) assert len(post_state.validator_registry) == len(pre_state.validator_registry) - assert len(post_state.validator_balances) == len(pre_state.validator_balances) - assert post_state.validator_balances[validator_index] == pre_balance + amount + assert len(post_state.balances) == len(pre_state.balances) + assert get_balance(post_state, validator_index) == pre_balance + amount return pre_state, [block], post_state @@ -412,8 +415,8 @@ def test_transfer(state, pubkeys, privkeys): recipient_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] transfer_pubkey = pubkeys[-1] transfer_privkey = privkeys[-1] - amount = pre_state.validator_balances[sender_index] - pre_transfer_recipient_balance = pre_state.validator_balances[recipient_index] + amount = get_balance(pre_state, sender_index) + pre_transfer_recipient_balance = get_balance(pre_state, recipient_index) transfer = Transfer( sender=sender_index, recipient=recipient_index, @@ -448,8 +451,8 @@ def test_transfer(state, pubkeys, privkeys): block.body.transfers.append(transfer) state_transition(post_state, block) - sender_balance = post_state.validator_balances[sender_index] - recipient_balance = post_state.validator_balances[recipient_index] + sender_balance = get_balance(post_state, sender_index) + recipient_balance = get_balance(post_state, recipient_index) assert sender_balance == 0 assert recipient_balance == pre_transfer_recipient_balance + amount @@ -465,7 +468,7 @@ def test_ejection(state): assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH # set validator balance to below ejection threshold - pre_state.validator_balances[validator_index] = spec.EJECTION_BALANCE - 1 + set_balance(pre_state, validator_index, spec.EJECTION_BALANCE - 1) post_state = deepcopy(pre_state) # From f6da42ffb32fed8e22769dbf77f906889b1e02a2 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 10:04:20 -0600 Subject: [PATCH 085/481] fix markdown issues --- specs/core/0_beacon-chain.md | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3bc95f717..4eee3dcb5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -61,9 +61,9 @@ - [`is_active_validator`](#is_active_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - [`get_balance`](#get_balance) - - [`set_balance`](#set_balance) - - [`increase_balance`](#increase_balance) - - [`decrease_balance`](#decrease_balance) + - [`set_balance`](#set_balance) + - [`increase_balance`](#increase_balance) + - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) - [`get_split_offset`](#get_split_offset) - [`get_epoch_committee_count`](#get_epoch_committee_count) @@ -760,31 +760,32 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L def get_balance(state: BeaconState, index: int) -> int: return state.balances[index] ``` -#### `set_balance` -````python +### `set_balance` + +```python def set_balance(state: BeaconState, index: int, balance: int) -> None: validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT state.balances[index] = balance -```` +``` -#### `increase_balance` +### `increase_balance` -````python +```python def increase_balance(state: BeaconState, index: int, delta: int) -> None: set_balance(state, index, get_balance(state, index) + delta) -```` +``` -#### `decrease_balance` +### `decrease_balance` -````python +```python def decrease_balance(state: BeaconState, index: int, delta: int) -> None: cur_balance = get_balance(state, index) set_balance(state, index, cur_balance - delta if cur_balance >= delta else 0) -```` +``` ### `get_permuted_index` From d3f175d7289befde23d9810a29ee1ca40f02216a Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 21 Mar 2019 11:33:36 -0700 Subject: [PATCH 086/481] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 8878545bb..575df60fe 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,6 +1,18 @@ -# Beacon chain light client syncing +# Beacon Chain Light Client Syncing + +__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. + +## Table of Contents + + +- [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) + - [Table of Contents](#table-of-contents) + - [Light client state](#light-client-state) + - [Updating the shuffled committee](#updating-the-shuffled-committee) + - [Computing the current committee](#computing-the-current-committee) + - [Verifying blocks](#verifying-blocks) + -One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. ### Preliminaries From d1d1b73fb1783b564556a48ea86d69d8dd1003e7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 15:11:05 -0500 Subject: [PATCH 087/481] Simplify justification and finalization accounting logic Much of the simplification is cosmetic. The following changes are substantive: * Inactivity leak penalty specifically on missing the target, not both the target and the source * Even outside of quadratic leak scenarios, slashing victims suffer offline penalties --- specs/core/0_beacon-chain.md | 94 ++++++++---------------------------- 1 file changed, 21 insertions(+), 73 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1067c3dc0..e2cd8b162 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1883,10 +1883,11 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: ```python def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: - return ( - get_base_reward(state, index) + - get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 - ) + if epochs_since_finality <= 4: + extra_penalty = 0 + else: + extra_penalty = get_effective_balance(state, index) * min(epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 + return get_base_reward(state, index) + extra_penalty ``` Note: When applying penalties in the following balance recalculations implementers should make sure the `uint64` does not underflow. @@ -1896,22 +1897,8 @@ Note: When applying penalties in the following balance recalculations implemente ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch - if epochs_since_finality <= 4: - return compute_normal_justification_and_finalization_deltas(state) - else: - return compute_inactivity_leak_deltas(state) -``` - -When blocks are finalizing normally... - -```python -def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] # Some helper variables boundary_attestations = get_previous_epoch_boundary_attestations(state) boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) @@ -1919,76 +1906,37 @@ def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) matching_head_attestations = get_previous_epoch_matching_head_attestations(state) matching_head_balance = get_attesting_balance(state, matching_head_attestations) + eligible_validators = [ + i for i,v in enumerate(state.validator_registry) if is_active_validator(v, get_current_epoch(state)) or + (v.slashed and get_current_epoch(state) < v.withdrawable_epoch) + ] # Process rewards or penalties for all validators - for index in get_active_validator_indices(state.validator_registry, get_previous_epoch(state)): + for index in eligible_validators: # Expected FFG source if index in get_attesting_indices(state, state.previous_epoch_attestations): - deltas[0][index] += get_base_reward(state, index) * total_attesting_balance // total_balance + rewards[index] += get_base_reward(state, index) * total_attesting_balance // total_balance # Inclusion speed bonus - deltas[0][index] += ( + rewards[index] += ( get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index) ) else: - deltas[1][index] += get_base_reward(state, index) + penalties[index] += get_base_reward(state, index) # Expected FFG target if index in get_attesting_indices(state, boundary_attestations): - deltas[0][index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance + rewards[index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance else: - deltas[1][index] += get_base_reward(state, index) + penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) # Expected head if index in get_attesting_indices(state, matching_head_attestations): - deltas[0][index] += get_base_reward(state, index) * matching_head_balance // total_balance + rewards[index] += get_base_reward(state, index) * matching_head_balance // total_balance else: - deltas[1][index] += get_base_reward(state, index) + penalties[index] += get_base_reward(state, index) # Proposer bonus if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - deltas[0][proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT - return deltas -``` - -When blocks are not finalizing normally... - -```python -def compute_inactivity_leak_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] - boundary_attestations = get_previous_epoch_boundary_attestations(state) - matching_head_attestations = get_previous_epoch_matching_head_attestations(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, get_previous_epoch(state)) - epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch - for index in active_validator_indices: - if index not in get_attesting_indices(state, state.previous_epoch_attestations): - deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) - else: - # If a validator did attest, apply a small penalty for getting attestations included late - deltas[0][index] += ( - get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) - deltas[1][index] += get_base_reward(state, index) - if index not in get_attesting_indices(state, boundary_attestations): - deltas[1][index] += get_inactivity_penalty(state, index, epochs_since_finality) - if index not in get_attesting_indices(state, matching_head_attestations): - deltas[1][index] += get_base_reward(state, index) - # Penalize slashed-but-inactive validators as though they were active but offline - for index in range(len(state.validator_registry)): - eligible = ( - index not in active_validator_indices and - state.validator_registry[index].slashed and - get_current_epoch(state) < state.validator_registry[index].withdrawable_epoch - ) - if eligible: - deltas[1][index] += ( - 2 * get_inactivity_penalty(state, index, epochs_since_finality) + - get_base_reward(state, index) - ) - return deltas + rewards[proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + return [rewards, penalties] ``` ##### Crosslinks From 38a5c3640b30581a4e807ae6aba13e7266bd1a76 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 15:13:13 -0500 Subject: [PATCH 088/481] Re-added some penalization in case of failure to finalize --- specs/core/0_beacon-chain.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e2cd8b162..2a7b0c776 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1936,6 +1936,9 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) rewards[proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + # Take away max rewards if we're not finalizing + if epochs_since_finality > 4: + penalties[index] += get_base_reward(state, index) * 4 return [rewards, penalties] ``` From 3b403909e8d1571bc6a30ac9487d2ba49a9386cd Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 16:29:14 -0500 Subject: [PATCH 089/481] Cosmetic improvement to reward/penalty functions --- specs/core/0_beacon-chain.md | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1067c3dc0..05dda9fe7 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1995,12 +1995,8 @@ def compute_inactivity_leak_deltas(state: BeaconState) -> Tuple[List[Gwei], List ```python def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - # deltas[0] for rewards - # deltas[1] for penalties - deltas = [ - [0 for index in range(len(state.validator_registry))], - [0 for index in range(len(state.validator_registry))] - ] + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) for slot in range(previous_epoch_start_slot, current_epoch_start_slot): @@ -2010,10 +2006,10 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: if index in participants: - deltas[0][index] += get_base_reward(state, index) * participating_balance // total_balance + rewards[index] += get_base_reward(state, index) * participating_balance // total_balance else: - deltas[1][index] += get_base_reward(state, index) - return deltas + penalties[index] += get_base_reward(state, index) + return [rewards, penalties] ``` #### Apply rewards @@ -2022,12 +2018,12 @@ Run the following: ```python def apply_rewards(state: BeaconState) -> None: - deltas1 = get_justification_and_finalization_deltas(state) - deltas2 = get_crosslink_deltas(state) + rewards1, penalties1 = get_justification_and_finalization_deltas(state) + rewards2, penalties2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): state.validator_balances[i] = max( 0, - state.validator_balances[i] + deltas1[0][i] + deltas2[0][i] - deltas1[1][i] - deltas2[1][i] + state.validator_balances[i] + rewards1[i] + rewards2[i] - penalties1[i] - penalties2[i] ) ``` From 3ece05ccc1a5e126e934c57aa091386a4afeb8ef Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 21 Mar 2019 16:36:31 -0500 Subject: [PATCH 090/481] Small cosmetic change to slashable attestations --- specs/core/0_beacon-chain.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1067c3dc0..c7c74279f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -182,7 +182,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_BALANCE_CHURN_QUOTIENT` | `2**5` (= 32) | -| `MAX_INDICES_PER_SLASHABLE_VOTE` | `2**12` (= 4,096) | +| `MAX_SLASHABLE_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | @@ -1159,7 +1159,7 @@ def verify_slashable_attestation(state: BeaconState, slashable_attestation: Slas if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] return False - if len(slashable_attestation.validator_indices) == 0: + if not (1 <= len(slashable_attestation.validator_indices) <= MAX_SLASHABLE_ATTESTATION_PARTICIPANTS): return False for i in range(len(slashable_attestation.validator_indices) - 1): @@ -1169,9 +1169,6 @@ def verify_slashable_attestation(state: BeaconState, slashable_attestation: Slas if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): return False - if len(slashable_attestation.validator_indices) > MAX_INDICES_PER_SLASHABLE_VOTE: - return False - custody_bit_0_indices = [] custody_bit_1_indices = [] for i, validator_index in enumerate(slashable_attestation.validator_indices): From e313c5ba5abd949e2af87919a36f84937ee6b68c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 21 Mar 2019 17:08:54 -0600 Subject: [PATCH 091/481] add tests for proposer slashing --- .../test_process_proposer_slashing.py | 97 +++++++++++++++++++ tests/phase0/helpers.py | 44 +++++++++ tests/phase0/test_sanity.py | 39 +------- 3 files changed, 144 insertions(+), 36 deletions(-) create mode 100644 tests/phase0/block_processing/test_process_proposer_slashing.py diff --git a/tests/phase0/block_processing/test_process_proposer_slashing.py b/tests/phase0/block_processing/test_process_proposer_slashing.py new file mode 100644 index 000000000..467d2164b --- /dev/null +++ b/tests/phase0/block_processing/test_process_proposer_slashing.py @@ -0,0 +1,97 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec +from build.phase0.spec import ( + get_balance, + get_current_epoch, + process_proposer_slashing, +) +from tests.phase0.helpers import ( + get_valid_proposer_slashing, +) + +# mark entire file as 'header' +pytestmark = pytest.mark.proposer_slashings + + +def run_proposer_slashing_processing(state, proposer_slashing, valid=True): + """ + Run ``process_proposer_slashing`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_proposer_slashing(post_state, proposer_slashing) + return state, None + + process_proposer_slashing(post_state, proposer_slashing) + + slashed_validator = post_state.validator_registry[proposer_slashing.proposer_index] + assert not slashed_validator.initiated_exit + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + # lost whistleblower reward + assert ( + get_balance(post_state, proposer_slashing.proposer_index) < + get_balance(state, proposer_slashing.proposer_index) + ) + + return state, post_state + + +def test_success(state): + proposer_slashing = get_valid_proposer_slashing(state) + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing) + + return pre_state, proposer_slashing, post_state + + +def test_epochs_are_different(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set slots to be in different epochs + proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state + + +def test_headers_are_same(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set headers to be the same + proposer_slashing.header_2 = proposer_slashing.header_1 + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state + + +def test_proposer_is_slashed(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set proposer to slashed + state.validator_registry[proposer_slashing.proposer_index].slashed = True + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state + + +def test_proposer_is_withdrawn(state): + proposer_slashing = get_valid_proposer_slashing(state) + + # set proposer withdrawable_epoch in past + current_epoch = get_current_epoch(state) + proposer_index = proposer_slashing.proposer_index + state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1 + + pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False) + + return pre_state, proposer_slashing, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 5c61685a6..3987289bf 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -7,14 +7,18 @@ from build.phase0.utils.minimal_ssz import signed_root from build.phase0.spec import ( # constants EMPTY_SIGNATURE, + ZERO_HASH, # SSZ AttestationData, + BeaconBlockHeader, Deposit, DepositInput, DepositData, Eth1Data, + ProposerSlashing, VoluntaryExit, # functions + get_active_validator_indices, get_block_root, get_current_epoch, get_domain, @@ -199,3 +203,43 @@ def build_deposit(state, ) return deposit, root, deposit_data_leaves + + +def get_valid_proposer_slashing(state): + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[-1] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + slot = state.slot + + header_1 = BeaconBlockHeader( + slot=slot, + previous_block_root=ZERO_HASH, + state_root=ZERO_HASH, + block_body_root=ZERO_HASH, + signature=EMPTY_SIGNATURE, + ) + header_2 = deepcopy(header_1) + header_2.previous_block_root = b'\x02' * 32 + header_2.slot = slot + 1 + + domain = get_domain( + fork=state.fork, + epoch=get_current_epoch(state), + domain_type=spec.DOMAIN_BEACON_BLOCK, + ) + header_1.signature = bls.sign( + message_hash=signed_root(header_1), + privkey=privkey, + domain=domain, + ) + header_2.signature = bls.sign( + message_hash=signed_root(header_2), + privkey=privkey, + domain=domain, + ) + + return ProposerSlashing( + proposer_index=validator_index, + header_1=header_1, + header_2=header_2, + ) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index ec03fb355..444075a13 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -46,6 +46,7 @@ from tests.phase0.helpers import ( build_deposit_data, build_empty_block_for_next_slot, force_registry_change_at_next_epoch, + get_valid_proposer_slashing, ) @@ -117,42 +118,8 @@ def test_empty_epoch_transition_not_finalizing(state): def test_proposer_slashing(state, pubkeys, privkeys): test_state = deepcopy(state) - current_epoch = get_current_epoch(test_state) - validator_index = get_active_validator_indices(test_state.validator_registry, current_epoch)[-1] - privkey = privkeys[validator_index] - slot = spec.GENESIS_SLOT - header_1 = BeaconBlockHeader( - slot=slot, - previous_block_root=ZERO_HASH, - state_root=ZERO_HASH, - block_body_root=ZERO_HASH, - signature=EMPTY_SIGNATURE, - ) - header_2 = deepcopy(header_1) - header_2.previous_block_root = b'\x02' * 32 - header_2.slot = slot + 1 - - domain = get_domain( - fork=test_state.fork, - epoch=get_current_epoch(test_state), - domain_type=spec.DOMAIN_BEACON_BLOCK, - ) - header_1.signature = bls.sign( - message_hash=signed_root(header_1), - privkey=privkey, - domain=domain, - ) - header_2.signature = bls.sign( - message_hash=signed_root(header_2), - privkey=privkey, - domain=domain, - ) - - proposer_slashing = ProposerSlashing( - proposer_index=validator_index, - header_1=header_1, - header_2=header_2, - ) + proposer_slashing = get_valid_proposer_slashing(state) + validator_index = proposer_slashing.proposer_index # # Add to state via block transition From 11c3291817f3d590723298877c41d0bf244b789b Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 21 Mar 2019 16:30:45 -0700 Subject: [PATCH 092/481] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 8878545bb..143f82a39 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -40,8 +40,8 @@ def get_later_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_earlier_start_epoch(header.slot) - validator_count = len(get_active_validator_indices(state, period_start)) + period_start = get_earlier_start_epoch(block.slot) + validator_count = len(get_active_validator_indices(block.state, period_start)) committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) return PeriodData( @@ -51,8 +51,8 @@ def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> Peri ) def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_later_start_epoch(header.slot) - validator_count = len(get_active_validator_indices(state, period_start)) + period_start = get_later_start_epoch(block.slot) + validator_count = len(get_active_validator_indices(block.state, period_start)) committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) return PeriodData( From ae67e9513b46e045b87bdc302f6b20c0fc341e2f Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 22 Mar 2019 12:56:54 +0800 Subject: [PATCH 093/481] Fix type hinting and add docstrings --- specs/core/0_beacon-chain.md | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a67e6291c..c29aa113d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -770,14 +770,21 @@ def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> L ### `get_balance` ```python -def get_balance(state: BeaconState, index: int) -> int: +def get_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: + """ + Return the balance for a validator with the given ``index``. + """ return state.balances[index] ``` ### `set_balance` ```python -def set_balance(state: BeaconState, index: int, balance: int) -> None: +def set_balance(state: BeaconState, index: ValidatorIndex, balance: Gwei) -> None: + """ + Set the balance for a validator with the given ``index`` in both ``BeaconState`` + and validator's rounded balance ``high_balance``. + """ validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: @@ -788,16 +795,23 @@ def set_balance(state: BeaconState, index: int, balance: int) -> None: ### `increase_balance` ```python -def increase_balance(state: BeaconState, index: int, delta: int) -> None: +def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Increase the balance for a validator with the given ``index`` by ``delta``. + """ set_balance(state, index, get_balance(state, index) + delta) ``` ### `decrease_balance` ```python -def decrease_balance(state: BeaconState, index: int, delta: int) -> None: - cur_balance = get_balance(state, index) - set_balance(state, index, cur_balance - delta if cur_balance >= delta else 0) +def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Decrease the balance for a validator with the given ``index`` by ``delta``. + Set to ``0`` when underflow. + """ + current_balance = get_balance(state, index) + set_balance(state, index, current_balance - delta if current_balance >= delta else 0) ``` ### `get_permuted_index` From b34858c67b6c0df1bbaaf9c9d44dd68000ebb273 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 22 Mar 2019 14:21:33 +0800 Subject: [PATCH 094/481] Refactor `get_justification_and_finalization_deltas` --- specs/core/0_beacon-chain.md | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2a7b0c776..b374b094f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -115,7 +115,7 @@ - [Helper functions](#helper-functions-1) - [Justification](#justification) - [Crosslinks](#crosslinks) - - [Eth1 data](#eth1-data-1) + - [Eth1 data](#eth1-data) - [Rewards and penalties](#rewards-and-penalties) - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks-1) @@ -128,7 +128,7 @@ - [Per-block processing](#per-block-processing) - [Block header](#block-header) - [RANDAO](#randao) - - [Eth1 data](#eth1-data) + - [Eth1 data](#eth1-data-1) - [Transactions](#transactions) - [Proposer slashings](#proposer-slashings) - [Attester slashings](#attester-slashings) @@ -1896,7 +1896,8 @@ Note: When applying penalties in the following balance recalculations implemente ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - epochs_since_finality = get_current_epoch(state) + 1 - state.finalized_epoch + current_epoch = get_current_epoch(state) + epochs_since_finality = current_epoch + 1 - state.finalized_epoch rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] # Some helper variables @@ -1907,38 +1908,42 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ matching_head_attestations = get_previous_epoch_matching_head_attestations(state) matching_head_balance = get_attesting_balance(state, matching_head_attestations) eligible_validators = [ - i for i,v in enumerate(state.validator_registry) if is_active_validator(v, get_current_epoch(state)) or - (v.slashed and get_current_epoch(state) < v.withdrawable_epoch) + index for index, validator in enumerate(state.validator_registry) + if ( + is_active_validator(validator, current_epoch) or + (validator.slashed and current_epoch < validator.withdrawable_epoch) + ) ] # Process rewards or penalties for all validators for index in eligible_validators: + base_reward = get_base_reward(state, index) # Expected FFG source if index in get_attesting_indices(state, state.previous_epoch_attestations): - rewards[index] += get_base_reward(state, index) * total_attesting_balance // total_balance + rewards[index] += base_reward * total_attesting_balance // total_balance # Inclusion speed bonus rewards[index] += ( - get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // + base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index) ) else: - penalties[index] += get_base_reward(state, index) + penalties[index] += base_reward # Expected FFG target if index in get_attesting_indices(state, boundary_attestations): - rewards[index] += get_base_reward(state, index) * boundary_attesting_balance // total_balance + rewards[index] += base_reward * boundary_attesting_balance // total_balance else: penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) # Expected head if index in get_attesting_indices(state, matching_head_attestations): - rewards[index] += get_base_reward(state, index) * matching_head_balance // total_balance + rewards[index] += base_reward * matching_head_balance // total_balance else: - penalties[index] += get_base_reward(state, index) + penalties[index] += base_reward # Proposer bonus if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - rewards[proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + rewards[proposer_index] += base_reward // ATTESTATION_INCLUSION_REWARD_QUOTIENT # Take away max rewards if we're not finalizing if epochs_since_finality > 4: - penalties[index] += get_base_reward(state, index) * 4 + penalties[index] += base_reward * 4 return [rewards, penalties] ``` From e8257db32062a2a674bef8f1c4689d93ba5e0e26 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 22 Mar 2019 05:40:41 -0500 Subject: [PATCH 095/481] Removed hanging min --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b374b094f..a4719c702 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1886,7 +1886,7 @@ def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_sin if epochs_since_finality <= 4: extra_penalty = 0 else: - extra_penalty = get_effective_balance(state, index) * min(epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 + extra_penalty = get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 return get_base_reward(state, index) + extra_penalty ``` From 3ee9fc0cc775a05042f7acbfc46e03ec24d14104 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 22 Mar 2019 06:10:44 -0500 Subject: [PATCH 096/481] Merge attestation verification logic Also rename slashable attestation to standalone attestation to reflect its broader functionality in phase 1. --- specs/core/0_beacon-chain.md | 84 +++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c29aa113d..a4d5f5ec6 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -28,7 +28,7 @@ - [`Eth1DataVote`](#eth1datavote) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - - [`SlashableAttestation`](#slashableattestation) + - [`StandaloneAttestation`](#standaloneattestation) - [`DepositInput`](#depositinput) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) @@ -90,7 +90,8 @@ - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - - [`verify_slashable_attestation`](#verify_slashable_attestation) + - [`convert_to_standalone`](#convert_to_standalone) + - [`verify_standalone_attestation`](#verify_standalone_attestation) - [`is_double_vote`](#is_double_vote) - [`is_surround_vote`](#is_surround_vote) - [`integer_squareroot`](#integer_squareroot) @@ -187,7 +188,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_BALANCE_CHURN_QUOTIENT` | `2**5` (= 32) | -| `MAX_SLASHABLE_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | +| `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | @@ -369,7 +370,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `SlashableAttestation` +#### `StandaloneAttestation` ```python { @@ -489,10 +490,10 @@ The types are defined topologically to aid in facilitating an executable version ```python { - # First slashable attestation - 'slashable_attestation_1': SlashableAttestation, - # Second slashable attestation - 'slashable_attestation_2': SlashableAttestation, + # First attestation + 'attestation_1': StandaloneAttestation, + # Second attestation + 'attestation_2': StandaloneAttestation, } ``` @@ -1116,7 +1117,7 @@ def get_attestation_participants(state: BeaconState, aggregation_bit = get_bitfield_bit(bitfield, i) if aggregation_bit == 0b1: participants.append(validator_index) - return participants + return sorted(participants) ``` ### `is_power_of_two` @@ -1214,30 +1215,45 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: return True ``` -### `verify_slashable_attestation` +### `convert_to_standalone` ```python -def verify_slashable_attestation(state: BeaconState, slashable_attestation: SlashableAttestation) -> bool: +def convert_to_standalone(state: BeaconState, attestation: Attestation): """ - Verify validity of ``slashable_attestation`` fields. + Converts an attestation to (almost) standalone-verifiable form """ - if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + return StandaloneAttestation( + validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), + data=attestation.data, + custody_bitfield=attestation.custody_bitfield, + aggregate_signature=attestation.aggregate_signature + ) +``` + +### `verify_standalone_attestation` + +```python +def verify_standalone_attestation(state: BeaconState, standalone_attestation: StandaloneAttestation) -> bool: + """ + Verify validity of ``standalone_attestation`` fields. + """ + if standalone_attestation.custody_bitfield != b'\x00' * len(standalone_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] return False - if not (1 <= len(slashable_attestation.validator_indices) <= MAX_SLASHABLE_ATTESTATION_PARTICIPANTS): + if not (1 <= len(standalone_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - for i in range(len(slashable_attestation.validator_indices) - 1): - if slashable_attestation.validator_indices[i] >= slashable_attestation.validator_indices[i + 1]: + for i in range(len(standalone_attestation.validator_indices) - 1): + if standalone_attestation.validator_indices[i] >= standalone_attestation.validator_indices[i + 1]: return False - if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): + if not verify_bitfield(standalone_attestation.custody_bitfield, len(standalone_attestation.validator_indices)): return False custody_bit_0_indices = [] custody_bit_1_indices = [] - for i, validator_index in enumerate(slashable_attestation.validator_indices): - if get_bitfield_bit(slashable_attestation.custody_bitfield, i) == 0b0: + for i, validator_index in enumerate(standalone_attestation.validator_indices): + if get_bitfield_bit(standalone_attestation.custody_bitfield, i) == 0b0: custody_bit_0_indices.append(validator_index) else: custody_bit_1_indices.append(validator_index) @@ -1248,11 +1264,11 @@ def verify_slashable_attestation(state: BeaconState, slashable_attestation: Slas bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), ], message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b1)), + hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b1)), ], - signature=slashable_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(slashable_attestation.data.slot), DOMAIN_ATTESTATION), + signature=standalone_attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(standalone_attestation.data.slot), DOMAIN_ATTESTATION), ) ``` @@ -2408,16 +2424,16 @@ def process_attester_slashing(state: BeaconState, Process ``AttesterSlashing`` transaction. Note that this function mutates ``state``. """ - attestation1 = attester_slashing.slashable_attestation_1 - attestation2 = attester_slashing.slashable_attestation_2 + attestation1 = attester_slashing.attestation_1 + attestation2 = attester_slashing.attestation_2 # Check that the attestations are conflicting assert attestation1.data != attestation2.data assert ( is_double_vote(attestation1.data, attestation2.data) or is_surround_vote(attestation1.data, attestation2.data) ) - assert verify_slashable_attestation(state, attestation1) - assert verify_slashable_attestation(state, attestation2) + assert verify_standalone_attestation(state, attestation1) + assert verify_standalone_attestation(state, attestation2) slashable_indices = [ index for index in attestation1.validator_indices if ( @@ -2462,18 +2478,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ), } - # Check custody bits [to be generalised in phase 1] - assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) - - # Check aggregate signature [to be generalised in phase 1] - participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - assert len(participants) != 0 - assert bls_verify( - pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), - message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), - signature=attestation.aggregate_signature, - domain=get_domain(state.fork, target_epoch, DOMAIN_ATTESTATION), - ) + # Check signature and bitfields + assert verify_standalone_attestation(state, convert_to_standalone(state, attestation)) # Cache pending attestation pending_attestation = PendingAttestation( From ce18bde5c9cb81a85105bbd6f93980f29dbe714b Mon Sep 17 00:00:00 2001 From: vbuterin Date: Fri, 22 Mar 2019 06:20:38 -0500 Subject: [PATCH 097/481] Simplified sorted index check --- specs/core/0_beacon-chain.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a4d5f5ec6..94784e625 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1243,10 +1243,9 @@ def verify_standalone_attestation(state: BeaconState, standalone_attestation: St if not (1 <= len(standalone_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - for i in range(len(standalone_attestation.validator_indices) - 1): - if standalone_attestation.validator_indices[i] >= standalone_attestation.validator_indices[i + 1]: - return False - + if standalone_attestation.validator_indices != sorted(standalone_attestation.validator_indices): + return False + if not verify_bitfield(standalone_attestation.custody_bitfield, len(standalone_attestation.validator_indices)): return False From 80e2553afd675f508a42b42a44a224b97fe2b6f1 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 22 Mar 2019 09:32:21 -0400 Subject: [PATCH 098/481] Update specs/core/0_beacon-chain.md Co-Authored-By: vbuterin --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 94784e625..3ae2c7e13 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1220,7 +1220,7 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: ```python def convert_to_standalone(state: BeaconState, attestation: Attestation): """ - Converts an attestation to (almost) standalone-verifiable form + Convert an attestation to (almost) standalone-verifiable form """ return StandaloneAttestation( validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), From 5b40baa69eaac7151a6c90b9ce292cef827339b5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 23 Mar 2019 11:58:20 +0800 Subject: [PATCH 099/481] Adjust the sanity test for attestation verification integration --- tests/phase0/test_sanity.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 444075a13..f7670c126 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -227,22 +227,18 @@ def test_attestation(state, pubkeys, privkeys): crosslink_committees = get_crosslink_committees_at_slot(state, slot) crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] - committee_size = len(crosslink_committee) - bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) - custody_bitfield = b'\x00' * bitfield_length + # Select the first validator to be the attester + participants = [crosslink_committee[0]] + aggregation_bitfield_length = (len(crosslink_committee) + 7) // 8 + custody_bitfield_length = (len(participants) + 7) // 8 + aggregation_bitfield = b'\x01' + b'\x00' * (aggregation_bitfield_length - 1) + custody_bitfield = b'\x00' * custody_bitfield_length attestation = Attestation( aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, aggregate_signature=EMPTY_SIGNATURE, ) - participants = get_attestation_participants( - test_state, - attestation.data, - attestation.aggregation_bitfield, - ) - assert len(participants) == 1 validator_index = participants[0] privkey = privkeys[validator_index] From b7441e8ab78560b3b48b3e1bd10de0aef6172080 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 25 Mar 2019 14:30:59 +0000 Subject: [PATCH 100/481] Generalise `slash_validator` for phase 1 Make `slash_validator` friendly to phase 1. This is a cosmetic change in the context of phase 0. --- specs/core/0_beacon-chain.md | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c29aa113d..2eeee7802 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -251,7 +251,8 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | | - | - | | `BASE_REWARD_QUOTIENT` | `2**5` (= 32) | -| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) | +| `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | +| `PROPOSER_REWARD_QUOTIENT` | `2**4` (= 16) | | `ATTESTATION_INCLUSION_REWARD_QUOTIENT` | `2**3` (= 8) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | @@ -1448,21 +1449,25 @@ def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: #### `slash_validator` ```python -def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: +def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whitleblower_index: ValidatorIndex=None) -> None: """ - Slash the validator with index ``index``. + Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. """ - validator = state.validator_registry[index] - exit_validator(state, index) - state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) + exit_validator(state, slashed_index) + state.validator_registry[slashed_index].slashed = True + state.validator_registry[slashed_index].withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH + slashed_balance = get_effective_balance(state, slashed_index) + state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance - whistleblower_index = get_beacon_proposer_index(state, state.slot) - whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - increase_balance(state, whistleblower_index, whistleblower_reward) - decrease_balance(state, index, whistleblower_reward) - validator.slashed = True - validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH + proposer_index = get_beacon_proposer_index(state, state.slot) + if whileblower_index is None: + whileblower_index = proposer_index + whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT + proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT + increase_balance(state, proposer_index, proposer_reward) + increase_balance(state, whitleblower_index, whistleblowing_reward - proposer_reward) + decrease_balance(state, slashed_index, whistleblower_reward) ``` #### `prepare_validator_for_withdrawal` From fb837400b2b2f5c14fd25d7e875fb1a236b83f64 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 25 Mar 2019 14:49:35 +0000 Subject: [PATCH 101/481] Can't spell (thanks continuous integration!) --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2eeee7802..ff142e048 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1449,7 +1449,7 @@ def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: #### `slash_validator` ```python -def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whitleblower_index: ValidatorIndex=None) -> None: +def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex=None) -> None: """ Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. @@ -1461,12 +1461,12 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whitleblo state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance proposer_index = get_beacon_proposer_index(state, state.slot) - if whileblower_index is None: - whileblower_index = proposer_index + if whistleblower_index is None: + whistleblower_index = proposer_index whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT increase_balance(state, proposer_index, proposer_reward) - increase_balance(state, whitleblower_index, whistleblowing_reward - proposer_reward) + increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward) decrease_balance(state, slashed_index, whistleblower_reward) ``` From acc5f314ac601b3887722a9e6a9783dfd075ebfb Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 25 Mar 2019 14:54:43 +0000 Subject: [PATCH 102/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ff142e048..00c229036 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1467,7 +1467,7 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl proposer_reward = whistleblowing_reward // PROPOSER_REWARD_QUOTIENT increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, whistleblowing_reward - proposer_reward) - decrease_balance(state, slashed_index, whistleblower_reward) + decrease_balance(state, slashed_index, whistleblowing_reward) ``` #### `prepare_validator_for_withdrawal` From fceebeec4e518886b8c2986baee019d2585d8132 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 25 Mar 2019 11:25:33 -0600 Subject: [PATCH 103/481] backport 839 into dev --- .gitignore | 1 + specs/core/0_beacon-chain.md | 11 ++-- tests/phase0/__init__.py | 0 .../block_processing/test_process_deposit.py | 17 ++++-- .../block_processing/test_voluntary_exit.py | 21 ++++---- tests/phase0/conftest.py | 18 ------- tests/phase0/helpers.py | 16 +++--- tests/phase0/test_sanity.py | 18 ++++--- utils/phase0/jsonize.py | 52 +++++++++++++++++++ 9 files changed, 101 insertions(+), 53 deletions(-) create mode 100644 tests/phase0/__init__.py create mode 100644 utils/phase0/jsonize.py diff --git a/.gitignore b/.gitignore index dfb38d170..f33dd5256 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ /.pytest_cache build/ +output/ diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c29aa113d..e7e540a26 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -692,7 +692,8 @@ def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: previous_block_root=block.previous_block_root, state_root=ZERO_HASH, block_body_root=hash_tree_root(block.body), - signature=block.signature, + # signed_root(block) is used for block id purposes so signature is a stub + signature=EMPTY_SIGNATURE, ) ``` @@ -1749,7 +1750,7 @@ def cache_state(state: BeaconState) -> None: state.latest_block_header.state_root = previous_slot_state_root # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = hash_tree_root(state.latest_block_header) + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signed_root(state.latest_block_header) ``` ### Per-epoch processing @@ -2198,8 +2199,8 @@ def update_registry_and_shuffling_data(state: BeaconState) -> None: state.current_shuffling_epoch = next_epoch state.current_shuffling_start_shard = ( state.current_shuffling_start_shard + - get_current_epoch_committee_count(state) % SHARD_COUNT - ) + get_current_epoch_committee_count(state) + ) % SHARD_COUNT state.current_shuffling_seed = generate_seed(state, state.current_shuffling_epoch) else: # If processing at least one crosslink keeps failing, then reshuffle every power of two, @@ -2315,7 +2316,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the parent matches - assert block.previous_block_root == hash_tree_root(state.latest_block_header) + assert block.previous_block_root == signed_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) # Verify proposer is not slashed diff --git a/tests/phase0/__init__.py b/tests/phase0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py index 9f1b6add6..0726dddef 100644 --- a/tests/phase0/block_processing/test_process_deposit.py +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -4,12 +4,14 @@ import pytest import build.phase0.spec as spec from build.phase0.spec import ( - Deposit, get_balance, + ZERO_HASH, process_deposit, ) from tests.phase0.helpers import ( build_deposit, + privkeys, + pubkeys, ) @@ -17,8 +19,10 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.voluntary_exits -def test_success(state, deposit_data_leaves, pubkeys, privkeys): +def test_success(state): pre_state = deepcopy(state) + # fill previous deposits with zero-hash + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(deposit_data_leaves) pubkey = pubkeys[index] @@ -47,8 +51,9 @@ def test_success(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, deposit, post_state -def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): +def test_success_top_up(state): pre_state = deepcopy(state) + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) validator_index = 0 amount = spec.MAX_DEPOSIT_AMOUNT // 4 @@ -78,8 +83,9 @@ def test_success_top_up(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, deposit, post_state -def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys): +def test_wrong_index(state): pre_state = deepcopy(state) + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(deposit_data_leaves) pubkey = pubkeys[index] @@ -106,8 +112,9 @@ def test_wrong_index(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, deposit, None -def test_bad_merkle_proof(state, deposit_data_leaves, pubkeys, privkeys): +def test_bad_merkle_proof(state): pre_state = deepcopy(state) + deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(deposit_data_leaves) pubkey = pubkeys[index] diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 0801e4292..6adc81464 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -10,6 +10,7 @@ from build.phase0.spec import ( ) from tests.phase0.helpers import ( build_voluntary_exit, + pubkey_to_privkey, ) @@ -17,7 +18,7 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.voluntary_exits -def test_success(state, pub_to_priv): +def test_success(state): pre_state = deepcopy(state) # # setup pre_state @@ -30,7 +31,7 @@ def test_success(state, pub_to_priv): # current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( pre_state, @@ -52,11 +53,11 @@ def test_success(state, pub_to_priv): return pre_state, voluntary_exit, post_state -def test_validator_not_active(state, pub_to_priv): +def test_validator_not_active(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # # setup pre_state @@ -79,7 +80,7 @@ def test_validator_not_active(state, pub_to_priv): return pre_state, voluntary_exit, None -def test_validator_already_exited(state, pub_to_priv): +def test_validator_already_exited(state): pre_state = deepcopy(state) # # setup pre_state @@ -89,7 +90,7 @@ def test_validator_already_exited(state, pub_to_priv): current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # but validator already has exited pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 @@ -110,7 +111,7 @@ def test_validator_already_exited(state, pub_to_priv): return pre_state, voluntary_exit, None -def test_validator_already_initiated_exit(state, pub_to_priv): +def test_validator_already_initiated_exit(state): pre_state = deepcopy(state) # # setup pre_state @@ -120,7 +121,7 @@ def test_validator_already_initiated_exit(state, pub_to_priv): current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # but validator already has initiated exit pre_state.validator_registry[validator_index].initiated_exit = True @@ -141,14 +142,14 @@ def test_validator_already_initiated_exit(state, pub_to_priv): return pre_state, voluntary_exit, None -def test_validator_not_active_long_enough(state, pub_to_priv): +def test_validator_not_active_long_enough(state): pre_state = deepcopy(state) # # setup pre_state # current_epoch = get_current_epoch(pre_state) validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pub_to_priv[pre_state.validator_registry[validator_index].pubkey] + privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] # but validator already has initiated exit pre_state.validator_registry[validator_index].initiated_exit = True diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index 395929028..36a087941 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -3,9 +3,6 @@ import pytest from build.phase0 import spec from tests.phase0.helpers import ( - privkeys_list, - pubkeys_list, - pubkey_to_privkey, create_genesis_state, ) @@ -25,21 +22,6 @@ MINIMAL_CONFIG = { } -@pytest.fixture -def privkeys(): - return privkeys_list - - -@pytest.fixture -def pubkeys(): - return pubkeys_list - - -@pytest.fixture -def pub_to_priv(): - return pubkey_to_privkey - - def overwrite_spec_config(config): for field in config: setattr(spec, field, config[field]) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 3987289bf..a0ede04e5 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -35,18 +35,20 @@ from build.phase0.utils.merkle_minimal import ( ) -privkeys_list = [i + 1 for i in range(1000)] -pubkeys_list = [bls.privtopub(privkey) for privkey in privkeys_list] -pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys_list, pubkeys_list)} +privkeys = [i + 1 for i in range(1000)] +pubkeys = [bls.privtopub(privkey) for privkey in privkeys] +pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} -def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): +def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): + if not deposit_data_leaves: + deposit_data_leaves = [] deposit_timestamp = 0 proof_of_possession = b'\x33' * 96 deposit_data_list = [] for i in range(num_validators): - pubkey = pubkeys_list[i] + pubkey = pubkeys[i] deposit_data = DepositData( amount=spec.MAX_DEPOSIT_AMOUNT, timestamp=deposit_timestamp, @@ -75,7 +77,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves): return genesis_validator_deposits, root -def create_genesis_state(num_validators, deposit_data_leaves): +def create_genesis_state(num_validators, deposit_data_leaves=None): initial_deposits, deposit_root = create_mock_genesis_validator_deposits( num_validators, deposit_data_leaves, @@ -105,7 +107,7 @@ def build_empty_block_for_next_slot(state): previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = previous_block_header.hash_tree_root() + empty_block.previous_block_root = signed_root(previous_block_header) return empty_block diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 444075a13..0b195fe96 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -47,6 +47,8 @@ from tests.phase0.helpers import ( build_empty_block_for_next_slot, force_registry_change_at_next_epoch, get_valid_proposer_slashing, + privkeys, + pubkeys, ) @@ -116,7 +118,7 @@ def test_empty_epoch_transition_not_finalizing(state): return state, [block], test_state -def test_proposer_slashing(state, pubkeys, privkeys): +def test_proposer_slashing(state): test_state = deepcopy(state) proposer_slashing = get_valid_proposer_slashing(state) validator_index = proposer_slashing.proposer_index @@ -142,9 +144,9 @@ def test_proposer_slashing(state, pubkeys, privkeys): return state, [block], test_state -def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): +def test_deposit_in_block(state): pre_state = deepcopy(state) - test_deposit_data_leaves = deepcopy(deposit_data_leaves) + test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) index = len(test_deposit_data_leaves) pubkey = pubkeys[index] @@ -179,9 +181,9 @@ def test_deposit_in_block(state, deposit_data_leaves, pubkeys, privkeys): return pre_state, [block], post_state -def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): +def test_deposit_top_up(state): pre_state = deepcopy(state) - test_deposit_data_leaves = deepcopy(deposit_data_leaves) + test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) validator_index = 0 amount = spec.MAX_DEPOSIT_AMOUNT // 4 @@ -218,7 +220,7 @@ def test_deposit_top_up(state, pubkeys, privkeys, deposit_data_leaves): return pre_state, [block], post_state -def test_attestation(state, pubkeys, privkeys): +def test_attestation(state): test_state = deepcopy(state) slot = state.slot shard = state.current_shuffling_start_shard @@ -287,7 +289,7 @@ def test_attestation(state, pubkeys, privkeys): return state, [attestation_block, epoch_block], test_state -def test_voluntary_exit(state, pubkeys, privkeys): +def test_voluntary_exit(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( pre_state.validator_registry, @@ -375,7 +377,7 @@ def test_no_exit_churn_too_long_since_change(state): return pre_state, [block], post_state -def test_transfer(state, pubkeys, privkeys): +def test_transfer(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) sender_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] diff --git a/utils/phase0/jsonize.py b/utils/phase0/jsonize.py new file mode 100644 index 000000000..816192ec6 --- /dev/null +++ b/utils/phase0/jsonize.py @@ -0,0 +1,52 @@ +from .minimal_ssz import hash_tree_root + + +def jsonize(value, typ, include_hash_tree_roots=False): + if isinstance(typ, str) and typ[:4] == 'uint': + return value + elif typ == 'bool': + assert value in (True, False) + return value + elif isinstance(typ, list): + return [jsonize(element, typ[0], include_hash_tree_roots) for element in value] + elif isinstance(typ, str) and typ[:4] == 'byte': + return '0x' + value.hex() + elif hasattr(typ, 'fields'): + ret = {} + for field, subtype in typ.fields.items(): + ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots) + if include_hash_tree_roots: + ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() + if include_hash_tree_roots: + ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() + return ret + else: + print(value, typ) + raise Exception("Type not recognized") + + +def dejsonize(json, typ): + if isinstance(typ, str) and typ[:4] == 'uint': + return json + elif typ == 'bool': + assert json in (True, False) + return json + elif isinstance(typ, list): + return [dejsonize(element, typ[0]) for element in json] + elif isinstance(typ, str) and typ[:4] == 'byte': + return bytes.fromhex(json[2:]) + elif hasattr(typ, 'fields'): + temp = {} + for field, subtype in typ.fields.items(): + temp[field] = dejsonize(json[field], subtype) + if field + "_hash_tree_root" in json: + assert(json[field + "_hash_tree_root"][2:] == + hash_tree_root(temp[field], subtype).hex()) + ret = typ(**temp) + if "hash_tree_root" in json: + assert(json["hash_tree_root"][2:] == + hash_tree_root(ret, typ).hex()) + return ret + else: + print(json, typ) + raise Exception("Type not recognized") From 6cc82278b4a1208bc2da94a37f398eb12c96e4e1 Mon Sep 17 00:00:00 2001 From: Matthew Slipper Date: Mon, 25 Mar 2019 13:27:18 -0700 Subject: [PATCH 104/481] Update rpc-interface.md --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index 51dc3a900..fa49bcd75 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -103,7 +103,7 @@ Since some clients are waiting for `libp2p` implementations in their respective ``` ( network_id: uint8 - chain_id: uint8 + chain_id: uint64 latest_finalized_root: bytes32 latest_finalized_epoch: uint64 best_root: bytes32 From 0121adea3831ac527fcb4c8a63a04bd63ac8684f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 07:09:48 -0600 Subject: [PATCH 105/481] fix beacon proposer function and mod v-guide to not have lookahead for proposing --- specs/core/0_beacon-chain.md | 20 ++++++++-------- specs/validator/0_beacon-chain-validator.md | 26 +++++++++++++++------ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 485233746..c96c28888 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1061,23 +1061,23 @@ def generate_seed(state: BeaconState, ```python def get_beacon_proposer_index(state: BeaconState, - slot: Slot, - registry_change: bool=False) -> ValidatorIndex: + slot: Slot) -> ValidatorIndex: """ Return the beacon proposer index for the ``slot``. + Due to proposer selection being based upon the validator balances during + the epoch in question, this can only be run for the current epoch. """ - epoch = slot_to_epoch(slot) current_epoch = get_current_epoch(state) - previous_epoch = get_previous_epoch(state) - next_epoch = current_epoch + 1 + assert slot_to_epoch(slot) == current_epoch - assert previous_epoch <= epoch <= next_epoch - - first_committee, _ = get_crosslink_committees_at_slot(state, slot, registry_change)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] i = 0 while True: - rand_byte = hash(generate_seed(get_current_epoch(state)) + int_to_bytes8(i // 32))[i % 32] - candidate = first_committee[(epoch + i) % len(first_committee)] + rand_byte = hash( + generate_seed(state, current_epoch) + + int_to_bytes8(i // 32) + )[i % 32] + candidate = first_committee[(current_epoch + i) % len(first_committee)] if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: return candidate i += 1 diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 62a7011b4..f1a10a048 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -338,15 +338,13 @@ def get_committee_assignment( state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex, - registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot, bool]: + registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot]: """ Return the committee assignment in the ``epoch`` for ``validator_index`` and ``registry_change``. ``assignment`` returned is a tuple of the following form: * ``assignment[0]`` is the list of validators in the committee * ``assignment[1]`` is the shard to which the committee is assigned * ``assignment[2]`` is the slot at which the committee is assigned - * ``assignment[3]`` is a bool signalling if the validator is expected to propose - a beacon block at the assigned slot. """ previous_epoch = get_previous_epoch(state) next_epoch = get_current_epoch(state) + 1 @@ -367,15 +365,29 @@ def get_committee_assignment( if len(selected_committees) > 0: validators = selected_committees[0][0] shard = selected_committees[0][1] - is_proposer = validator_index == get_beacon_proposer_index(state, slot, registry_change=registry_change) - assignment = (validators, shard, slot, is_proposer) + assignment = (validators, shard, slot) return assignment ``` +A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the epoch of the slot in question and can not reliably be used to predict an epoch in advance. + +```python +def is_proposer_at_slot(state: BeaconState, + slot: Slot, + validator_index: ValidatorIndex) -> bool: + current_epoch = get_current_epoch(state) + assert slot_to_epoch(slot) == current_epoch + + return get_beacon_proposer_index(state, slot) == validator_index +``` + +_Note_: If a validator is assigned to the 0th slot of an epoch, the validator must run an empty slot transition from the previous epoch into the 0th slot of the epoch to be able to check if they are a proposer at that slot. + + ### Lookahead -The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming assignments of proposing and attesting dictated by the shuffling and slot. +The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the epoch in question. There are three possibilities for the shuffling at the next epoch: 1. The shuffling changes due to a "validator registry change". @@ -386,7 +398,7 @@ Either (2) or (3) occurs if (1) fails. The choice between (2) and (3) is determi When querying for assignments in the next epoch there are two options -- with and without a `registry_change` -- which is the optional fourth parameter of the `get_committee_assignment`. -`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and propose and also which shard one should begin syncing (in phase 1+). +`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and also which shard one should begin syncing (in phase 1+). Specifically, a validator should call both `get_committee_assignment(state, next_epoch, validator_index, registry_change=True)` and `get_committee_assignment(state, next_epoch, validator_index, registry_change=False)` when checking for next epoch assignments. From be57cafbfbd70368a133a4bd1bc274b306f2af0e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 07:17:08 -0600 Subject: [PATCH 106/481] switch utils hash-function to sha256 --- utils/phase0/hash_function.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/utils/phase0/hash_function.py b/utils/phase0/hash_function.py index 21e6555bf..3fee63d82 100644 --- a/utils/phase0/hash_function.py +++ b/utils/phase0/hash_function.py @@ -1,7 +1,6 @@ -# from hashlib import sha256 -from eth_utils import keccak +from hashlib import sha256 +# from eth_utils import keccak -# def hash(x): return sha256(x).digest() -def hash(x): - return keccak(x) +def hash(x): return sha256(x).digest() +# def hash(x): return keccak(x) From 87d2618a495ad382d6810a4fe4b96d4d91f9355f Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 26 Mar 2019 13:21:49 +0000 Subject: [PATCH 107/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 00c229036..61ebe5e83 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -252,8 +252,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | - | - | | `BASE_REWARD_QUOTIENT` | `2**5` (= 32) | | `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | -| `PROPOSER_REWARD_QUOTIENT` | `2**4` (= 16) | -| `ATTESTATION_INCLUSION_REWARD_QUOTIENT` | `2**3` (= 8) | +| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | @@ -2016,7 +2015,7 @@ def compute_normal_justification_and_finalization_deltas(state: BeaconState) -> # Proposer bonus if index in get_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - deltas[0][proposer_index] += get_base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT + deltas[0][proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT return deltas ``` From 5a708bae348221bfe1926775b31a4de05b23a090 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 07:38:51 -0600 Subject: [PATCH 108/481] fix tests --- tests/phase0/helpers.py | 29 ++++++++++------------------- tests/phase0/test_sanity.py | 4 ++-- 2 files changed, 12 insertions(+), 21 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index a0ede04e5..3c68c2c8c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -12,7 +12,6 @@ from build.phase0.spec import ( AttestationData, BeaconBlockHeader, Deposit, - DepositInput, DepositData, Eth1Data, ProposerSlashing, @@ -43,21 +42,17 @@ pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkey def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): if not deposit_data_leaves: deposit_data_leaves = [] - deposit_timestamp = 0 proof_of_possession = b'\x33' * 96 deposit_data_list = [] for i in range(num_validators): pubkey = pubkeys[i] deposit_data = DepositData( + pubkey=pubkey, + # insecurely use pubkey as withdrawal key as well + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], amount=spec.MAX_DEPOSIT_AMOUNT, - timestamp=deposit_timestamp, - deposit_input=DepositInput( - pubkey=pubkey, - # insecurely use pubkey as withdrawal key as well - withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - proof_of_possession=proof_of_possession, - ), + proof_of_possession=proof_of_possession, ) item = hash(deposit_data.serialize()) deposit_data_leaves.append(item) @@ -72,7 +67,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=N genesis_validator_deposits.append(Deposit( proof=list(get_merkle_proof(tree, item_index=i)), index=i, - deposit_data=deposit_data_list[i] + data=deposit_data_list[i] )) return genesis_validator_deposits, root @@ -112,14 +107,15 @@ def build_empty_block_for_next_slot(state): def build_deposit_data(state, pubkey, privkey, amount): - deposit_input = DepositInput( + deposit_data = DepositData( pubkey=pubkey, # insecurely use pubkey as withdrawal key as well withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], + amount=amount, proof_of_possession=EMPTY_SIGNATURE, ) proof_of_possession = bls.sign( - message_hash=signed_root(deposit_input), + message_hash=signed_root(deposit_data), privkey=privkey, domain=get_domain( state.fork, @@ -127,12 +123,7 @@ def build_deposit_data(state, pubkey, privkey, amount): spec.DOMAIN_DEPOSIT, ) ) - deposit_input.proof_of_possession = proof_of_possession - deposit_data = DepositData( - amount=amount, - timestamp=0, - deposit_input=deposit_input, - ) + deposit_data.proof_of_possession = proof_of_possession return deposit_data @@ -201,7 +192,7 @@ def build_deposit(state, deposit = Deposit( proof=list(proof), index=index, - deposit_data=deposit_data, + data=deposit_data, ) return deposit, root, deposit_data_leaves diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 0b195fe96..19e75f672 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -163,7 +163,7 @@ def test_deposit_in_block(state): deposit = Deposit( proof=list(proof), index=index, - deposit_data=deposit_data, + data=deposit_data, ) pre_state.latest_eth1_data.deposit_root = root @@ -202,7 +202,7 @@ def test_deposit_top_up(state): deposit = Deposit( proof=list(proof), index=merkle_index, - deposit_data=deposit_data, + data=deposit_data, ) pre_state.latest_eth1_data.deposit_root = root From a8410b8b843608bbbcec9c4cad76898f7435ae07 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 11:27:07 -0600 Subject: [PATCH 109/481] add some attestation tests. fix genesi crosslink bug --- specs/core/0_beacon-chain.md | 11 ++- .../test_process_attestation.py | 67 +++++++++++++++++++ tests/phase0/helpers.py | 50 ++++++++++++++ tests/phase0/test_sanity.py | 50 +------------- 4 files changed, 124 insertions(+), 54 deletions(-) create mode 100644 tests/phase0/block_processing/test_process_attestation.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e628c7057..4b52bd2fa 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -905,21 +905,20 @@ def get_crosslink_committees_at_slot(state: BeaconState, next_epoch = current_epoch + 1 assert previous_epoch <= epoch <= next_epoch - active_validator_indices = get_active_validator_indices( + indices = get_active_validator_indices( state.validator_registry, epoch, ) - committees_per_epoch = get_epoch_committee_count(len(active_validator_indices)) + committees_per_epoch = get_epoch_committee_count(len(indices)) if epoch == current_epoch: start_shard = state.latest_start_shard elif epoch == previous_epoch: - start_shard = (state.latest_start_shard - SLOTS_PER_EPOCH * committees_per_epoch) % SHARD_COUNT + start_shard = (state.latest_start_shard - committees_per_epoch) % SHARD_COUNT elif epoch == next_epoch: current_epoch_committees = get_current_epoch_committee_count(state) - start_shard = (state.latest_start_shard + EPOCH_LENGTH * current_epoch_committees) % SHARD_COUNT + start_shard = (state.latest_start_shard + current_epoch_committees) % SHARD_COUNT - indices = get_active_validator_indices(state.validator_registry, epoch) committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT @@ -1830,7 +1829,7 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: current_epoch = get_current_epoch(state) - previous_epoch = current_epoch - 1 + previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) next_epoch = current_epoch + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py new file mode 100644 index 000000000..80770fdf9 --- /dev/null +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -0,0 +1,67 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.state_transition import ( + state_transition, +) +from build.phase0.spec import ( + ZERO_HASH, + get_current_epoch, + process_attestation, + slot_to_epoch, +) +from tests.phase0.helpers import ( + build_empty_block_for_next_slot, + get_valid_attestation, +) + + +# mark entire file as 'attestations' +pytestmark = pytest.mark.attestations + + +def run_attestation_processing(state, attestation, valid=True): + """ + Run ``process_attestation`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_attestation(post_state, attestation) + return state, None + + process_attestation(post_state, attestation) + + current_epoch = get_current_epoch(state) + target_epoch = slot_to_epoch(attestation.data.slot) + if target_epoch == current_epoch: + assert len(post_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 + else: + assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1 + + + return state, post_state + + +def test_success(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + pre_state, post_state = run_attestation_processing(state, attestation) + + return pre_state, attestation, post_state + + +def test_success_prevous_epoch(state): + attestation = get_valid_attestation(state) + block = build_empty_block_for_next_slot(state) + block.slot = state.slot + spec.SLOTS_PER_EPOCH + state_transition(state, block) + + pre_state, post_state = run_attestation_processing(state, attestation) + + return pre_state, attestation, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 3c68c2c8c..d7f4ae6e8 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -9,7 +9,9 @@ from build.phase0.spec import ( EMPTY_SIGNATURE, ZERO_HASH, # SSZ + Attestation, AttestationData, + AttestationDataAndCustodyBit, BeaconBlockHeader, Deposit, DepositData, @@ -18,7 +20,9 @@ from build.phase0.spec import ( VoluntaryExit, # functions get_active_validator_indices, + get_attestation_participants, get_block_root, + get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, @@ -236,3 +240,49 @@ def get_valid_proposer_slashing(state): header_1=header_1, header_2=header_2, ) + + +def get_valid_attestation(state, slot=None): + if slot is None: + slot = state.slot + shard = state.latest_start_shard + attestation_data = build_attestation_data(state, slot, shard) + + crosslink_committees = get_crosslink_committees_at_slot(state, slot) + crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] + + committee_size = len(crosslink_committee) + bitfield_length = (committee_size + 7) // 8 + aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) + custody_bitfield = b'\x00' * bitfield_length + attestation = Attestation( + aggregation_bitfield=aggregation_bitfield, + data=attestation_data, + custody_bitfield=custody_bitfield, + aggregate_signature=EMPTY_SIGNATURE, + ) + participants = get_attestation_participants( + state, + attestation.data, + attestation.aggregation_bitfield, + ) + assert len(participants) == 1 + + validator_index = participants[0] + privkey = privkeys[validator_index] + + message_hash = AttestationDataAndCustodyBit( + data=attestation.data, + custody_bit=0b0, + ).hash_tree_root() + + attestation.aggregation_signature = bls.sign( + message_hash=message_hash, + privkey=privkey, + domain=get_domain( + fork=state.fork, + epoch=get_current_epoch(state), + domain_type=spec.DOMAIN_ATTESTATION, + ) + ) + return attestation diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index a2cbadd9a..b287bde51 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -11,19 +11,13 @@ from build.phase0.spec import ( EMPTY_SIGNATURE, ZERO_HASH, # SSZ - Attestation, - AttestationDataAndCustodyBit, - BeaconBlockHeader, Deposit, Transfer, - ProposerSlashing, VoluntaryExit, # functions get_active_validator_indices, - get_attestation_participants, get_balance, get_block_root, - get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_state_root, @@ -42,10 +36,10 @@ from build.phase0.utils.merkle_minimal import ( get_merkle_root, ) from tests.phase0.helpers import ( - build_attestation_data, build_deposit_data, build_empty_block_for_next_slot, force_registry_change_at_next_epoch, + get_valid_attestation, get_valid_proposer_slashing, privkeys, pubkeys, @@ -222,47 +216,7 @@ def test_deposit_top_up(state): def test_attestation(state): test_state = deepcopy(state) - slot = state.slot - shard = state.latest_start_shard - attestation_data = build_attestation_data(state, slot, shard) - - crosslink_committees = get_crosslink_committees_at_slot(state, slot) - crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] - - committee_size = len(crosslink_committee) - bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) - custody_bitfield = b'\x00' * bitfield_length - attestation = Attestation( - aggregation_bitfield=aggregation_bitfield, - data=attestation_data, - custody_bitfield=custody_bitfield, - aggregate_signature=EMPTY_SIGNATURE, - ) - participants = get_attestation_participants( - test_state, - attestation.data, - attestation.aggregation_bitfield, - ) - assert len(participants) == 1 - - validator_index = participants[0] - privkey = privkeys[validator_index] - - message_hash = AttestationDataAndCustodyBit( - data=attestation.data, - custody_bit=0b0, - ).hash_tree_root() - - attestation.aggregation_signature = bls.sign( - message_hash=message_hash, - privkey=privkey, - domain=get_domain( - fork=test_state.fork, - epoch=get_current_epoch(test_state), - domain_type=spec.DOMAIN_ATTESTATION, - ) - ) + attestation = get_valid_attestation(state) # # Add to state via block transition From 9fa6055a8a5b2c73774f143d3abdbe23323e93b4 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 11:41:15 -0600 Subject: [PATCH 110/481] add more attestation tests --- .../test_process_attestation.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index 80770fdf9..b34c64d95 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -65,3 +65,90 @@ def test_success_prevous_epoch(state): pre_state, post_state = run_attestation_processing(state, attestation) return pre_state, attestation, post_state + + +def test_before_inclusion_delay(state): + attestation = get_valid_attestation(state) + # do not increment slot to allow for inclusion delay + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_after_epoch_slots(state): + attestation = get_valid_attestation(state) + block = build_empty_block_for_next_slot(state) + # increment past latest inclusion slot + block.slot = state.slot + spec.SLOTS_PER_EPOCH + 1 + state_transition(state, block) + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_bad_source_epoch(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_epoch += 10 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_bad_source_root(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.source_root = b'\x42'*32 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_non_zero_crosslink_data_root(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.data.crosslink_data_root = b'\x42'*32 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_bad_previous_crosslink(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + state.latest_crosslinks[attestation.data.shard].epoch += 10 + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_non_empty_custody_bitfield(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.custody_bitfield = b'\x01' + attestation.custody_bitfield[1:] + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state + + +def test_empty_aggregation_bitfield(state): + attestation = get_valid_attestation(state) + state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + + attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield) + + pre_state, post_state = run_attestation_processing(state, attestation, False) + + return pre_state, attestation, post_state From b91dfd6244f7196827a311d879aa29ad7eb83f05 Mon Sep 17 00:00:00 2001 From: Vitalik Buterin Date: Tue, 26 Mar 2019 12:56:35 -0500 Subject: [PATCH 111/481] Add merkle_parial_from_paths --- specs/light_client/merkle_proofs.md | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 311a4aa5c..2e92488cb 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -158,26 +158,32 @@ def verify_multi_proof(root, indices, leaves, proof): We define: -#### `MerklePartialLeaf` - -```python -{ - "path": ["uint64"], - "value": "bytes32" -} -``` - #### `MerklePartial` ```python { "root": "bytes32", - "values": [MerklePartialLeaf], + "indices": ["uint64"], + "values": ["bytes32"], "proof": ["bytes32"] } ``` +#### `merkle_partial_from_paths` + +```python +def merkle_partial_from_paths(obj, paths): + indices = set() + for path in paths: + indices = indices.union(get_generalized_indices(obj, path)) + return MerklePartial( + root=hash_tree_root(obj), + indices=indices, + values= mk_multi_proof + ) +``` + #### Proofs for execution We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `MerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. From 2c5a68b5b5d4348ede49f07e50b943eb22c03414 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 12:32:24 -0600 Subject: [PATCH 112/481] remove registry_change options from shuffling functions --- specs/core/0_beacon-chain.md | 8 ++----- specs/validator/0_beacon-chain-validator.md | 21 +++++-------------- .../test_process_attestation.py | 1 - 3 files changed, 7 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4b52bd2fa..38f5f56c5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -891,13 +891,9 @@ def get_current_epoch_committee_count(state: BeaconState) -> int: ```python def get_crosslink_committees_at_slot(state: BeaconState, - slot: Slot, - registry_change: bool=False) -> List[Tuple[List[ValidatorIndex], Shard]]: + slot: Slot) -> List[Tuple[List[ValidatorIndex], Shard]]: """ Return the list of ``(committee, shard)`` tuples for the ``slot``. - - Note: There are two possible shufflings for crosslink committees for a - ``slot`` in the next epoch -- with and without a `registry_change` """ epoch = slot_to_epoch(slot) current_epoch = get_current_epoch(state) @@ -2339,7 +2335,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check target epoch, source epoch, and source root target_epoch = slot_to_epoch(attestation.data.slot) assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), } diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index f1a10a048..4a4c63836 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -331,16 +331,15 @@ signed_attestation_data = bls_sign( ## Validator assignments -A validator can get the current and previous epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= current_epoch`. +A validator can get the current, previous, and next epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= next_epoch`. ```python def get_committee_assignment( state: BeaconState, epoch: Epoch, - validator_index: ValidatorIndex, - registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot]: + validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]: """ - Return the committee assignment in the ``epoch`` for ``validator_index`` and ``registry_change``. + Return the committee assignment in the ``epoch`` for ``validator_index``. ``assignment`` returned is a tuple of the following form: * ``assignment[0]`` is the list of validators in the committee * ``assignment[1]`` is the shard to which the committee is assigned @@ -355,7 +354,6 @@ def get_committee_assignment( crosslink_committees = get_crosslink_committees_at_slot( state, slot, - registry_change=registry_change, ) selected_committees = [ committee # Tuple[List[ValidatorIndex], Shard] @@ -389,18 +387,9 @@ _Note_: If a validator is assigned to the 0th slot of an epoch, the validator mu The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the epoch in question. -There are three possibilities for the shuffling at the next epoch: -1. The shuffling changes due to a "validator registry change". -2. The shuffling changes due to `epochs_since_last_registry_update` being an exact power of 2 greater than 1. -3. The shuffling remains the same (i.e. the validator is in the same shard committee). +`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in phase 1+). -Either (2) or (3) occurs if (1) fails. The choice between (2) and (3) is deterministic based upon `epochs_since_last_registry_update`. - -When querying for assignments in the next epoch there are two options -- with and without a `registry_change` -- which is the optional fourth parameter of the `get_committee_assignment`. - -`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and also which shard one should begin syncing (in phase 1+). - -Specifically, a validator should call both `get_committee_assignment(state, next_epoch, validator_index, registry_change=True)` and `get_committee_assignment(state, next_epoch, validator_index, registry_change=False)` when checking for next epoch assignments. +Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. ## How to avoid slashing diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index b34c64d95..08cab11ff 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -43,7 +43,6 @@ def run_attestation_processing(state, attestation, valid=True): else: assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1 - return state, post_state From a38e3525cd27559cca9599c7c9cf7199b81b558b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 13:18:18 -0600 Subject: [PATCH 113/481] ensure validator balances are losing when no finality --- tests/phase0/test_sanity.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index b287bde51..3b4497ca5 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -108,6 +108,8 @@ def test_empty_epoch_transition_not_finalizing(state): assert test_state.slot == block.slot assert test_state.finalized_epoch < get_current_epoch(test_state) - 4 + for index in range(len(test_state.validator_registry)): + assert get_balance(test_state, index) < get_balance(state, index) return state, [block], test_state From 63e7346cfbc4b000c28b981710f43b9ec48a284a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 26 Mar 2019 13:40:19 -0600 Subject: [PATCH 114/481] standaline -> indexed --- specs/core/0_beacon-chain.md | 52 ++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2acf7ddbe..2e2c3ad59 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -28,7 +28,7 @@ - [`Eth1DataVote`](#eth1datavote) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - - [`StandaloneAttestation`](#standaloneattestation) + - [`IndexedAttestation`](#indexedattestation) - [`DepositData`](#depositdata) - [`BeaconBlockHeader`](#beaconblockheader) - [`Validator`](#validator) @@ -86,8 +86,8 @@ - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - - [`convert_to_standalone`](#convert_to_standalone) - - [`verify_standalone_attestation`](#verify_standalone_attestation) + - [`convert_to_indexed`](#convert_to_indexed) + - [`verify_indexed_attestation`](#verify_indexed_attestation) - [`is_double_vote`](#is_double_vote) - [`is_surround_vote`](#is_surround_vote) - [`integer_squareroot`](#integer_squareroot) @@ -370,7 +370,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `StandaloneAttestation` +#### `IndexedAttestation` ```python { @@ -480,9 +480,9 @@ The types are defined topologically to aid in facilitating an executable version ```python { # First attestation - 'attestation_1': StandaloneAttestation, + 'attestation_1': IndexedAttestation, # Second attestation - 'attestation_2': StandaloneAttestation, + 'attestation_2': IndexedAttestation, } ``` @@ -1148,14 +1148,14 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: return True ``` -### `convert_to_standalone` +### `convert_to_indexed` ```python -def convert_to_standalone(state: BeaconState, attestation: Attestation): +def convert_to_indexed(state: BeaconState, attestation: Attestation): """ - Convert an attestation to (almost) standalone-verifiable form + Convert an attestation to (almost) indexed-verifiable form """ - return StandaloneAttestation( + return IndexedAttestation( validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), data=attestation.data, custody_bitfield=attestation.custody_bitfield, @@ -1163,29 +1163,29 @@ def convert_to_standalone(state: BeaconState, attestation: Attestation): ) ``` -### `verify_standalone_attestation` +### `verify_indexed_attestation` ```python -def verify_standalone_attestation(state: BeaconState, standalone_attestation: StandaloneAttestation) -> bool: +def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: """ - Verify validity of ``standalone_attestation`` fields. + Verify validity of ``indexed_attestation`` fields. """ - if standalone_attestation.custody_bitfield != b'\x00' * len(standalone_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + if indexed_attestation.custody_bitfield != b'\x00' * len(indexed_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] return False - if not (1 <= len(standalone_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): + if not (1 <= len(indexed_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - if standalone_attestation.validator_indices != sorted(standalone_attestation.validator_indices): + if indexed_attestation.validator_indices != sorted(indexed_attestation.validator_indices): return False - if not verify_bitfield(standalone_attestation.custody_bitfield, len(standalone_attestation.validator_indices)): + if not verify_bitfield(indexed_attestation.custody_bitfield, len(indexed_attestation.validator_indices)): return False custody_bit_0_indices = [] custody_bit_1_indices = [] - for i, validator_index in enumerate(standalone_attestation.validator_indices): - if get_bitfield_bit(standalone_attestation.custody_bitfield, i) == 0b0: + for i, validator_index in enumerate(indexed_attestation.validator_indices): + if get_bitfield_bit(indexed_attestation.custody_bitfield, i) == 0b0: custody_bit_0_indices.append(validator_index) else: custody_bit_1_indices.append(validator_index) @@ -1196,11 +1196,11 @@ def verify_standalone_attestation(state: BeaconState, standalone_attestation: St bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), ], message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=standalone_attestation.data, custody_bit=0b1)), + hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)), ], - signature=standalone_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(standalone_attestation.data.slot), DOMAIN_ATTESTATION), + signature=indexed_attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(indexed_attestation.data.slot), DOMAIN_ATTESTATION), ) ``` @@ -2318,8 +2318,8 @@ def process_attester_slashing(state: BeaconState, is_double_vote(attestation1.data, attestation2.data) or is_surround_vote(attestation1.data, attestation2.data) ) - assert verify_standalone_attestation(state, attestation1) - assert verify_standalone_attestation(state, attestation2) + assert verify_indexed_attestation(state, attestation1) + assert verify_indexed_attestation(state, attestation2) slashable_indices = [ index for index in attestation1.validator_indices if ( @@ -2366,7 +2366,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: } # Check signature and bitfields - assert verify_standalone_attestation(state, convert_to_standalone(state, attestation)) + assert verify_indexed_attestation(state, convert_to_indexed(state, attestation)) # Cache pending attestation pending_attestation = PendingAttestation( From 1b975d2ceb669f860b7d7c73f71ad68f939618dc Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 27 Mar 2019 19:23:23 +0600 Subject: [PATCH 115/481] Use signed_root as block id in Honest V guide --- specs/validator/0_beacon-chain-validator.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 4a4c63836..0d6033acd 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -152,7 +152,7 @@ _Note:_ there might be "skipped" slots between the `parent` and `block`. These s ##### Parent root -Set `block.previous_block_root = hash_tree_root(parent)`. +Set `block.previous_block_root = signed_root(parent)`. ##### State root @@ -255,11 +255,11 @@ Set `attestation_data.shard = shard` where `shard` is the shard associated with ##### Beacon block root -Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`. +Set `attestation_data.beacon_block_root = signed_root(head_block)`. ##### Target root -Set `attestation_data.target_root = hash_tree_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. +Set `attestation_data.target_root = signed_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. _Note:_ This can be looked up in the state using: * Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. From fbb09795ed3dca6e98eb9ef97c572f4e590293cf Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 27 Mar 2019 08:31:56 -0600 Subject: [PATCH 116/481] fix convert_to_indexed custody bitfield bug --- specs/core/0_beacon-chain.md | 63 +++++++++++++++---- .../test_process_attestation.py | 2 +- tests/phase0/helpers.py | 36 +++++++---- 3 files changed, 78 insertions(+), 23 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2e2c3ad59..0bdfafb79 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -77,6 +77,7 @@ - [`generate_seed`](#generate_seed) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) + - [`get_crosslink_committee_for_attestation`](#get_crosslink_committee_for_attestation) - [`get_attestation_participants`](#get_attestation_participants) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) @@ -85,6 +86,7 @@ - [`get_fork_version`](#get_fork_version) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) + - [`set_bitfield_bit`](#set_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - [`convert_to_indexed`](#convert_to_indexed) - [`verify_indexed_attestation`](#verify_indexed_attestation) @@ -1037,6 +1039,20 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: return value == root ``` +### `get_crosslink_committee_for_attestation` + +```python +def get_crosslink_committee_for_attestation(state: BeaconState, + attestation_data: AttestationData) -> List[ValidatorIndex]: + # Find the committee in the list with the desired shard + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + + assert attestation_data.shard in [shard for _, shard in crosslink_committees] + crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + + return crosslink_committee +``` + ### `get_attestation_participants` ```python @@ -1046,11 +1062,7 @@ def get_attestation_participants(state: BeaconState, """ Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. """ - # Find the committee in the list with the desired shard - crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - - assert attestation_data.shard in [shard for _, shard in crosslink_committees] - crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) assert verify_bitfield(bitfield, len(crosslink_committee)) @@ -1060,7 +1072,7 @@ def get_attestation_participants(state: BeaconState, aggregation_bit = get_bitfield_bit(bitfield, i) if aggregation_bit == 0b1: participants.append(validator_index) - return sorted(participants) + return participants ``` ### `int_to_bytes1`, `int_to_bytes2`, ... @@ -1130,6 +1142,22 @@ def get_bitfield_bit(bitfield: bytes, i: int) -> int: return (bitfield[i // 8] >> (i % 8)) % 2 ``` +### `set_bitfield_bit` + +```python +def set_bitfield_bit(bitfield: bytes, i: int) -> int: + """ + Set the bit in ``bitfield`` at position ``i`` to ``1``. + """ + byte_index = i // 8 + bit_index = i % 8 + return ( + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index+1:] + ) +``` + ### `verify_bitfield` ```python @@ -1155,10 +1183,21 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation): """ Convert an attestation to (almost) indexed-verifiable form """ + attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + + # reconstruct custody bitfield for the truncated attesting_indices + custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) + custody_bitfield = b'\x00' * ((len(attesting_indices) + 7) // 8) + + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) + for i, validator_index in enumerate(crosslink_committee): + if get_bitfield_bit(attestation.custody_bitfield, i): + custody_bitfield = set_bitfield_bit(custody_bitfield, attesting_indices.index(validator_index)) + return IndexedAttestation( - validator_indices=get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield), + validator_indices=attesting_indices, data=attestation.data, - custody_bitfield=attestation.custody_bitfield, + custody_bitfield=custody_bitfield, aggregate_signature=attestation.aggregate_signature ) ``` @@ -1176,9 +1215,6 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA if not (1 <= len(indexed_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): return False - if indexed_attestation.validator_indices != sorted(indexed_attestation.validator_indices): - return False - if not verify_bitfield(indexed_attestation.custody_bitfield, len(indexed_attestation.validator_indices)): return False @@ -2318,6 +2354,11 @@ def process_attester_slashing(state: BeaconState, is_double_vote(attestation1.data, attestation2.data) or is_surround_vote(attestation1.data, attestation2.data) ) + + # check that indices are sorted + assert attestation1.validator_indices == sorted(attestation1.validator_indices) + assert attestation2.validator_indices == sorted(attestation2.validator_indices) + assert verify_indexed_attestation(state, attestation1) assert verify_indexed_attestation(state, attestation2) slashable_indices = [ diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index 08cab11ff..ca6933ce7 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -135,7 +135,7 @@ def test_non_empty_custody_bitfield(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - attestation.custody_bitfield = b'\x01' + attestation.custody_bitfield[1:] + attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) pre_state, post_state = run_attestation_processing(state, attestation, False) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index d7f4ae6e8..08ea6ca04 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -22,12 +22,14 @@ from build.phase0.spec import ( get_active_validator_indices, get_attestation_participants, get_block_root, + get_crosslink_committee_for_attestation, get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, get_epoch_start_slot, get_genesis_beacon_state, + slot_to_epoch, verify_merkle_branch, hash, ) @@ -248,12 +250,11 @@ def get_valid_attestation(state, slot=None): shard = state.latest_start_shard attestation_data = build_attestation_data(state, slot, shard) - crosslink_committees = get_crosslink_committees_at_slot(state, slot) - crosslink_committee = [committee for committee, _shard in crosslink_committees if _shard == attestation_data.shard][0] + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) committee_size = len(crosslink_committee) bitfield_length = (committee_size + 7) // 8 - aggregation_bitfield = b'\x01' + b'\x00' * (bitfield_length - 1) + aggregation_bitfield = b'\xC0' + b'\x00' * (bitfield_length - 1) custody_bitfield = b'\x00' * bitfield_length attestation = Attestation( aggregation_bitfield=aggregation_bitfield, @@ -266,23 +267,36 @@ def get_valid_attestation(state, slot=None): attestation.data, attestation.aggregation_bitfield, ) - assert len(participants) == 1 + assert len(participants) == 2 - validator_index = participants[0] - privkey = privkeys[validator_index] + signatures = [] + for validator_index in participants: + privkey = privkeys[validator_index] + signatures.append( + get_attestation_signature( + state, + attestation.data, + privkey + ) + ) + + attestation.aggregation_signature = bls.aggregate_signatures(signatures) + return attestation + + +def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): message_hash = AttestationDataAndCustodyBit( - data=attestation.data, - custody_bit=0b0, + data=attestation_data, + custody_bit=custody_bit, ).hash_tree_root() - attestation.aggregation_signature = bls.sign( + return bls.sign( message_hash=message_hash, privkey=privkey, domain=get_domain( fork=state.fork, - epoch=get_current_epoch(state), + epoch=slot_to_epoch(attestation_data.slot), domain_type=spec.DOMAIN_ATTESTATION, ) ) - return attestation From 53e528e56c3ed3a8d7c8074a380023e275a0c2e9 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 00:28:20 +0800 Subject: [PATCH 117/481] work in progress: setup test libs, pyspec, pytests --- .gitignore | 6 +- {scripts => py_tests}/__init__.py | 0 {tests => py_tests}/conftest.py | 0 {tests => py_tests/phase0}/__init__.py | 0 .../test_process_attestation.py | 0 .../test_process_block_header.py | 0 .../block_processing/test_process_deposit.py | 0 .../test_process_proposer_slashing.py | 0 .../block_processing/test_voluntary_exit.py | 0 {tests => py_tests}/phase0/conftest.py | 0 {tests => py_tests}/phase0/helpers.py | 0 {tests => py_tests}/phase0/test_sanity.py | 0 requirements.txt => py_tests/requirements.txt | 0 test_libs/gen_helpers/README.txt | 4 + .../gen_helpers/gen_base}/__init__.py | 0 test_libs/gen_helpers/gen_base/gen_runner.py | 81 +++++++++++++++++++ test_libs/gen_helpers/gen_base/gen_suite.py | 17 ++++ test_libs/gen_helpers/gen_base/gen_typing.py | 5 ++ test_libs/gen_helpers/requirements.txt | 2 + test_libs/gen_helpers/setup.py | 8 ++ test_libs/pyspec/README.md | 3 + {utils => test_libs/pyspec/debug}/__init__.py | 0 .../pyspec/debug}/jsonize.py | 2 +- .../pyspec}/phase0/__init__.py | 0 .../pyspec}/phase0/state_transition.py | 0 test_libs/pyspec/requirements.txt | 4 + test_libs/pyspec/setup.py | 13 +++ test_libs/pyspec/utils/__init__.py | 0 .../pyspec/utils}/bls_stub.py | 0 .../pyspec/utils}/hash_function.py | 0 .../pyspec/utils}/merkle_minimal.py | 0 .../pyspec/utils}/minimal_ssz.py | 0 32 files changed, 143 insertions(+), 2 deletions(-) rename {scripts => py_tests}/__init__.py (100%) rename {tests => py_tests}/conftest.py (100%) rename {tests => py_tests/phase0}/__init__.py (100%) rename {tests => py_tests}/phase0/block_processing/test_process_attestation.py (100%) rename {tests => py_tests}/phase0/block_processing/test_process_block_header.py (100%) rename {tests => py_tests}/phase0/block_processing/test_process_deposit.py (100%) rename {tests => py_tests}/phase0/block_processing/test_process_proposer_slashing.py (100%) rename {tests => py_tests}/phase0/block_processing/test_voluntary_exit.py (100%) rename {tests => py_tests}/phase0/conftest.py (100%) rename {tests => py_tests}/phase0/helpers.py (100%) rename {tests => py_tests}/phase0/test_sanity.py (100%) rename requirements.txt => py_tests/requirements.txt (100%) create mode 100644 test_libs/gen_helpers/README.txt rename {tests/phase0 => test_libs/gen_helpers/gen_base}/__init__.py (100%) create mode 100644 test_libs/gen_helpers/gen_base/gen_runner.py create mode 100644 test_libs/gen_helpers/gen_base/gen_suite.py create mode 100644 test_libs/gen_helpers/gen_base/gen_typing.py create mode 100644 test_libs/gen_helpers/requirements.txt create mode 100644 test_libs/gen_helpers/setup.py create mode 100644 test_libs/pyspec/README.md rename {utils => test_libs/pyspec/debug}/__init__.py (100%) rename {utils/phase0 => test_libs/pyspec/debug}/jsonize.py (97%) rename {utils => test_libs/pyspec}/phase0/__init__.py (100%) rename {utils => test_libs/pyspec}/phase0/state_transition.py (100%) create mode 100644 test_libs/pyspec/requirements.txt create mode 100644 test_libs/pyspec/setup.py create mode 100644 test_libs/pyspec/utils/__init__.py rename {utils/phase0 => test_libs/pyspec/utils}/bls_stub.py (100%) rename {utils/phase0 => test_libs/pyspec/utils}/hash_function.py (100%) rename {utils/phase0 => test_libs/pyspec/utils}/merkle_minimal.py (100%) rename {utils/phase0 => test_libs/pyspec/utils}/minimal_ssz.py (100%) diff --git a/.gitignore b/.gitignore index f33dd5256..816ecfa26 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,11 @@ *.pyc /__pycache__ -/venv +venv +.venvs +.venv /.pytest_cache build/ output/ + +yaml_tests/ diff --git a/scripts/__init__.py b/py_tests/__init__.py similarity index 100% rename from scripts/__init__.py rename to py_tests/__init__.py diff --git a/tests/conftest.py b/py_tests/conftest.py similarity index 100% rename from tests/conftest.py rename to py_tests/conftest.py diff --git a/tests/__init__.py b/py_tests/phase0/__init__.py similarity index 100% rename from tests/__init__.py rename to py_tests/phase0/__init__.py diff --git a/tests/phase0/block_processing/test_process_attestation.py b/py_tests/phase0/block_processing/test_process_attestation.py similarity index 100% rename from tests/phase0/block_processing/test_process_attestation.py rename to py_tests/phase0/block_processing/test_process_attestation.py diff --git a/tests/phase0/block_processing/test_process_block_header.py b/py_tests/phase0/block_processing/test_process_block_header.py similarity index 100% rename from tests/phase0/block_processing/test_process_block_header.py rename to py_tests/phase0/block_processing/test_process_block_header.py diff --git a/tests/phase0/block_processing/test_process_deposit.py b/py_tests/phase0/block_processing/test_process_deposit.py similarity index 100% rename from tests/phase0/block_processing/test_process_deposit.py rename to py_tests/phase0/block_processing/test_process_deposit.py diff --git a/tests/phase0/block_processing/test_process_proposer_slashing.py b/py_tests/phase0/block_processing/test_process_proposer_slashing.py similarity index 100% rename from tests/phase0/block_processing/test_process_proposer_slashing.py rename to py_tests/phase0/block_processing/test_process_proposer_slashing.py diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/py_tests/phase0/block_processing/test_voluntary_exit.py similarity index 100% rename from tests/phase0/block_processing/test_voluntary_exit.py rename to py_tests/phase0/block_processing/test_voluntary_exit.py diff --git a/tests/phase0/conftest.py b/py_tests/phase0/conftest.py similarity index 100% rename from tests/phase0/conftest.py rename to py_tests/phase0/conftest.py diff --git a/tests/phase0/helpers.py b/py_tests/phase0/helpers.py similarity index 100% rename from tests/phase0/helpers.py rename to py_tests/phase0/helpers.py diff --git a/tests/phase0/test_sanity.py b/py_tests/phase0/test_sanity.py similarity index 100% rename from tests/phase0/test_sanity.py rename to py_tests/phase0/test_sanity.py diff --git a/requirements.txt b/py_tests/requirements.txt similarity index 100% rename from requirements.txt rename to py_tests/requirements.txt diff --git a/test_libs/gen_helpers/README.txt b/test_libs/gen_helpers/README.txt new file mode 100644 index 000000000..10c97f2d6 --- /dev/null +++ b/test_libs/gen_helpers/README.txt @@ -0,0 +1,4 @@ +ETH 2.0 test generator helpers + +`eth2_test_gen_base`: A util to quickly write new test suite generators with. + diff --git a/tests/phase0/__init__.py b/test_libs/gen_helpers/gen_base/__init__.py similarity index 100% rename from tests/phase0/__init__.py rename to test_libs/gen_helpers/gen_base/__init__.py diff --git a/test_libs/gen_helpers/gen_base/gen_runner.py b/test_libs/gen_helpers/gen_base/gen_runner.py new file mode 100644 index 000000000..ad729449a --- /dev/null +++ b/test_libs/gen_helpers/gen_base/gen_runner.py @@ -0,0 +1,81 @@ +import argparse +import pathlib +import sys +from typing import List + +from ruamel.yaml import ( + YAML, +) + +from gen_base.gen_typing import TestSuiteCreator + + +def make_filename_for_test(test): + title = test["title"] + filename = title.lower().replace(" ", "_") + ".yaml" + return pathlib.Path(filename) + + +def validate_output_dir(path_str): + path = pathlib.Path(path_str) + + if not path.exists(): + raise argparse.ArgumentTypeError("Output directory must exist") + + if not path.is_dir(): + raise argparse.ArgumentTypeError("Output path must lead to a directory") + + return path + + +def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): + """ + Implementation for a general test generator. + :param generator_name: The name of the generator. (lowercase snake_case) + :param suite_creators: A list of suite creators, each of these builds a list of test cases. + :return: + """ + + parser = argparse.ArgumentParser( + prog="gen-" + generator_name, + description=f"Generate YAML test suite files for {generator_name}", + ) + parser.add_argument( + "-o", + "--output-dir", + dest="output_dir", + required=True, + type=validate_output_dir, + help="directory into which the generated YAML files will be dumped" + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="if set overwrite test files if they exist", + ) + + args = parser.parse_args() + output_dir = args.output_dir + if not args.force: + file_mode = "x" + else: + file_mode = "w" + + yaml = YAML(pure=True) + + print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...") + for suite_creator in suite_creators: + suite = suite_creator() + + filename = make_filename_for_test(suite) + path = output_dir / filename + + try: + with path.open(file_mode) as f: + yaml.dump(suite, f) + except IOError as e: + sys.exit(f'Error when dumping test "{suite["title"]}" ({e})') + + print("done.") diff --git a/test_libs/gen_helpers/gen_base/gen_suite.py b/test_libs/gen_helpers/gen_base/gen_suite.py new file mode 100644 index 000000000..fdfac8292 --- /dev/null +++ b/test_libs/gen_helpers/gen_base/gen_suite.py @@ -0,0 +1,17 @@ +from typing import Iterable + +from eth_utils import ( + to_dict, +) + +from gen_base.gen_typing import TestCase + + +@to_dict +def render_suite(*, title: str, summary: str, fork: str, config: str, test_cases: Iterable[TestCase]): + yield "title", title + if summary is not None: + yield "summary", summary + yield "fork", fork + yield "config", config + yield "test_cases", test_cases diff --git a/test_libs/gen_helpers/gen_base/gen_typing.py b/test_libs/gen_helpers/gen_base/gen_typing.py new file mode 100644 index 000000000..1384c870f --- /dev/null +++ b/test_libs/gen_helpers/gen_base/gen_typing.py @@ -0,0 +1,5 @@ +from typing import Callable, Dict, Any + +TestCase = Dict[str, Any] +TestSuite = Dict[str, Any] +TestSuiteCreator = Callable[[], TestSuite] diff --git a/test_libs/gen_helpers/requirements.txt b/test_libs/gen_helpers/requirements.txt new file mode 100644 index 000000000..3d6a39458 --- /dev/null +++ b/test_libs/gen_helpers/requirements.txt @@ -0,0 +1,2 @@ +ruamel.yaml==0.15.87 +eth-utils==1.4.1 diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py new file mode 100644 index 000000000..a6c65c212 --- /dev/null +++ b/test_libs/gen_helpers/setup.py @@ -0,0 +1,8 @@ +from distutils.core import setup + +setup( + name='gen_helpers', + version='1.0', + packages=['gen_base'], + install_requires=['ruamel.yaml', 'eth-utils'] +) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md new file mode 100644 index 000000000..2747ad8f3 --- /dev/null +++ b/test_libs/pyspec/README.md @@ -0,0 +1,3 @@ +# ETH 2.0 PySpec + +The py \ No newline at end of file diff --git a/utils/__init__.py b/test_libs/pyspec/debug/__init__.py similarity index 100% rename from utils/__init__.py rename to test_libs/pyspec/debug/__init__.py diff --git a/utils/phase0/jsonize.py b/test_libs/pyspec/debug/jsonize.py similarity index 97% rename from utils/phase0/jsonize.py rename to test_libs/pyspec/debug/jsonize.py index 816192ec6..ac0243a5d 100644 --- a/utils/phase0/jsonize.py +++ b/test_libs/pyspec/debug/jsonize.py @@ -1,4 +1,4 @@ -from .minimal_ssz import hash_tree_root +from shared_eth2.minimal_ssz import hash_tree_root def jsonize(value, typ, include_hash_tree_roots=False): diff --git a/utils/phase0/__init__.py b/test_libs/pyspec/phase0/__init__.py similarity index 100% rename from utils/phase0/__init__.py rename to test_libs/pyspec/phase0/__init__.py diff --git a/utils/phase0/state_transition.py b/test_libs/pyspec/phase0/state_transition.py similarity index 100% rename from utils/phase0/state_transition.py rename to test_libs/pyspec/phase0/state_transition.py diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt new file mode 100644 index 000000000..78d41708d --- /dev/null +++ b/test_libs/pyspec/requirements.txt @@ -0,0 +1,4 @@ +eth-utils>=1.3.0,<2 +eth-typing>=2.1.0,<3.0.0 +pycryptodome==3.7.3 +py_ecc>=1.6.0 diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py new file mode 100644 index 000000000..5d121a263 --- /dev/null +++ b/test_libs/pyspec/setup.py @@ -0,0 +1,13 @@ +from distutils.core import setup + +setup( + name='pyspec', + version='1.0', + packages=['debug', 'utils', 'phase0'], + install_requires=[ + "eth-utils>=1.3.0,<2", + "eth-typing>=2.1.0,<3.0.0", + "pycryptodome==3.7.3", + "py_ecc>=1.6.0", + ] +) diff --git a/test_libs/pyspec/utils/__init__.py b/test_libs/pyspec/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/phase0/bls_stub.py b/test_libs/pyspec/utils/bls_stub.py similarity index 100% rename from utils/phase0/bls_stub.py rename to test_libs/pyspec/utils/bls_stub.py diff --git a/utils/phase0/hash_function.py b/test_libs/pyspec/utils/hash_function.py similarity index 100% rename from utils/phase0/hash_function.py rename to test_libs/pyspec/utils/hash_function.py diff --git a/utils/phase0/merkle_minimal.py b/test_libs/pyspec/utils/merkle_minimal.py similarity index 100% rename from utils/phase0/merkle_minimal.py rename to test_libs/pyspec/utils/merkle_minimal.py diff --git a/utils/phase0/minimal_ssz.py b/test_libs/pyspec/utils/minimal_ssz.py similarity index 100% rename from utils/phase0/minimal_ssz.py rename to test_libs/pyspec/utils/minimal_ssz.py From 64f012b276958c06e8ce7fc6ee33a4b24f260530 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 00:32:13 +0800 Subject: [PATCH 118/481] Move test-generators to specs repo Co-authored-by: Chih Cheng Liang Co-authored-by: Danny Ryan Co-authored-by: Dmitrii Shmatko Co-authored-by: Jannik Luhn Co-authored-by: Paul Hauner Co-authored-by: protolambda --- test_generators/README.md | 151 +++++++++++++++ test_generators/bls/README.md | 20 ++ test_generators/bls/main.py | 192 ++++++++++++++++++++ test_generators/bls/requirements.txt | 2 + test_generators/shuffling/README.md | 16 ++ test_generators/shuffling/constants.py | 6 + test_generators/shuffling/core_helpers.py | 95 ++++++++++ test_generators/shuffling/main.py | 160 ++++++++++++++++ test_generators/shuffling/requirements.txt | 4 + test_generators/shuffling/utils.py | 6 + test_generators/shuffling/yaml_objects.py | 25 +++ test_generators/ssz/__init__.py | 0 test_generators/ssz/main.py | 84 +++++++++ test_generators/ssz/renderers.py | 102 +++++++++++ test_generators/ssz/requirements.txt | 2 + test_generators/ssz/uint_test_generators.py | 132 ++++++++++++++ 16 files changed, 997 insertions(+) create mode 100644 test_generators/README.md create mode 100644 test_generators/bls/README.md create mode 100644 test_generators/bls/main.py create mode 100644 test_generators/bls/requirements.txt create mode 100644 test_generators/shuffling/README.md create mode 100644 test_generators/shuffling/constants.py create mode 100644 test_generators/shuffling/core_helpers.py create mode 100644 test_generators/shuffling/main.py create mode 100644 test_generators/shuffling/requirements.txt create mode 100644 test_generators/shuffling/utils.py create mode 100644 test_generators/shuffling/yaml_objects.py create mode 100644 test_generators/ssz/__init__.py create mode 100644 test_generators/ssz/main.py create mode 100644 test_generators/ssz/renderers.py create mode 100644 test_generators/ssz/requirements.txt create mode 100644 test_generators/ssz/uint_test_generators.py diff --git a/test_generators/README.md b/test_generators/README.md new file mode 100644 index 000000000..2d6160c03 --- /dev/null +++ b/test_generators/README.md @@ -0,0 +1,151 @@ +# Eth2.0 Test Generators + +This directory of contains all the generators for YAML tests, consumed by Eth 2.0 client implementations. + +Any issues with the generators and/or generated tests should be filed + in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests/). + +Whenever a release is made, the new tests are automatically built and +[eth2TestGenBot](https://github.com/eth2TestGenBot) commits the changes to the test repository. + +## How to run generators + +pre-requisites: +- Python 3 installed +- PIP 3 +- GNU make + +### Cleaning + +This removes the existing virtual environments (`/test_generators/.venvs/`), and generated tests (`/yaml_tests/`). + +```bash +make clean +``` + +### Running all test generators + +This runs all the generators. + +```bash +make all +``` + +### Running a single generator + +The make file auto-detects generators in the `test_generators/` directory, + and provides a tests-gen target for each generator, see example. + +```bash +make ./tests/shuffling/ +``` + +## Developing a generator + +Simply open up the generator (not all at once) of choice in your favorite IDE/editor, and run: + +```bash +# Create a virtual environment (any venv/.venv/.venvs is git-ignored) +python3 -m venv .venv +# Activate the venv, this is where dependencies are installed for the generator +. .venv/bin/activate +``` + +Now that you have a virtual environment, write your generator. +It's recommended to extend the base-generator. + +Create a `requirements.txt` in the root of your generator directory: +``` +eth-utils==1.4.1 +../test_libs/gen_helpers +``` + +Install all the necessary requirements (re-run when you add more): +```bash +pip3 install -r requirements.txt --user +``` + +And write your initial test generator, extending the base generator: + +Write a `main.py` file, here's an example: + +```python +from gen_base import gen_runner, gen_suite, gen_typing + +from eth_utils import ( + to_dict, to_tuple +) + + +@to_dict +def bar_test_case(v: int): + yield "bar_v", v + yield "bar_v_plus_1", v + 1 + yield "bar_list", list(range(v)) + + +@to_tuple +def generate_bar_test_cases(): + for i in range(10): + yield bar_test_case(i) + + +def bar_test_suite() -> gen_typing.TestSuite: + return gen_suite.render_suite( + title="bar_minimal", + summary="Minimal example suite, testing bar.", + fork="v0.5.1", + config="minimal", + test_cases=generate_bar_test_cases()) + + +if __name__ == "__main__": + gen_runner.run_generator("foo", [bar_test_suite]) + +``` + +Recommendations: +- you can have more than just 1 generator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])` +- you can concatenate lists of test cases, if you don't want to split it up in suites. +- you can split your suite generators into different python files/packages, good for code organization. +- use config "minimal" for performance. But also implement a suite with the default config where necessary +- the test-generator accepts `--output` and `--force` (overwrite output) + +## How to add a new test generator + +In order to add a new test generator that builds `New Tests`: + +1. Create a new directory `new_tests`, within the `test_generators` directory. + Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later. +2. Your generator is assumed to have a `requirements.txt` file, + with any dependencies it may need. Leave it empty if your generator has none. +3. Your generator is assumed to have a `main.py` file in its root. + By adding the base generator to your requirements, you can make a generator really easily. See docs below. +4. Your generator is called with `-o some/file/path/for_testing/can/be_anything`. + The base generator helps you handle this; you only have to define suite headers, + and a list of tests for each suite you generate. +5. Finally, add any linting or testing commands to the + [circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml) + if desired to increase code quality. + +Note: you do not have to change the makefile. +However, if necessary (e.g. not using python, or mixing in other languages), submit an issue, and it can be a special case. +Do note that generators should be easy to maintain, lean, and based on the spec. + +All of this should be done in a pull request to the master branch. + +To deploy new tests to the testing repository: + +1. Create a release tag with a new version number on Github. +2. Increment either the: + - major version, to indicate a change in the general testing format + - minor version, if a new test generator has been added + - path version, in other cases. + +## How to remove a test generator + +If a test generator is not needed anymore, undo the steps described above and make a new release: + +1. remove the generator folder +2. remove the generated tests in the `eth2.0-tests` repository by opening a PR there. +3. make a new release diff --git a/test_generators/bls/README.md b/test_generators/bls/README.md new file mode 100644 index 000000000..9ce1b2f6c --- /dev/null +++ b/test_generators/bls/README.md @@ -0,0 +1,20 @@ +# BLS Test Generator + +Explanation of BLS12-381 type hierarchy +The base unit is bytes48 of which only 381 bits are used + +- FQ: uint381 modulo field modulus +- FQ2: (FQ, FQ) +- G2: (FQ2, FQ2, FQ2) + +## Resources + +- [Eth2.0 spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md) +- [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf) +- Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone +- [Zcash BLS parameters](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381) +- [Trinity implementation](https://github.com/ethereum/trinity/blob/master/eth2/_utils/bls.py) + +## Comments + +Compared to Zcash, Ethereum specs always requires the compressed form (c_flag / most significant bit always set). \ No newline at end of file diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py new file mode 100644 index 000000000..3c2ab454b --- /dev/null +++ b/test_generators/bls/main.py @@ -0,0 +1,192 @@ +""" +BLS test vectors generator +Usage: + "python tgen_bls path/to/output.yml" +""" + +# Standard library +import sys +from typing import Tuple + +# Third-party +import yaml + +# Ethereum +from eth_utils import int_to_big_endian, big_endian_to_int + +# Local imports +from py_ecc import bls + + +def int_to_hex(n: int) -> str: + return '0x' + int_to_big_endian(n).hex() + + +def hex_to_int(x: str) -> int: + return int(x, 16) + + +# Note: even though a domain is only an uint64, +# To avoid issues with YAML parsers that are limited to 53-bit (JS language limit) +# It is serialized as an hex string as well. +DOMAINS = [ + 0, + 1, + 1234, + 2**32-1, + 2**64-1 +] + +MESSAGES = [ + b'\x00' * 32, + b'\x56' * 32, + b'\xab' * 32, +] + +PRIVKEYS = [ + # Curve order is 256 so private keys are 32 bytes at most. + # Also not all integers is a valid private key, so using pre-generated keys + hex_to_int('0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3'), + hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'), + hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'), +] + + +def hash_message(msg: bytes, + domain: int) ->Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]: + """ + Hash message + Input: + - Message as bytes + - domain as uint64 + Output: + - Message hash as a G2 point + """ + return [ + [ + int_to_hex(fq2.coeffs[0]), + int_to_hex(fq2.coeffs[1]), + ] + for fq2 in bls.utils.hash_to_G2(msg, domain) + ] + + +def hash_message_compressed(msg: bytes, domain: int) -> Tuple[str, str]: + """ + Hash message + Input: + - Message as bytes + - domain as uint64 + Output: + - Message hash as a compressed G2 point + """ + z1, z2 = bls.utils.compress_G2(bls.utils.hash_to_G2(msg, domain)) + return [int_to_hex(z1), int_to_hex(z2)] + + +if __name__ == '__main__': + + # Order not preserved - https://github.com/yaml/pyyaml/issues/110 + metadata = { + 'title': 'BLS signature and aggregation tests', + 'summary': 'Test vectors for BLS signature', + 'test_suite': 'bls', + 'fork': 'phase0-0.5.0', + } + + case01_message_hash_G2_uncompressed = [] + for msg in MESSAGES: + for domain in DOMAINS: + case01_message_hash_G2_uncompressed.append({ + 'input': {'message': '0x' + msg.hex(), 'domain': int_to_hex(domain)}, + 'output': hash_message(msg, domain) + }) + + case02_message_hash_G2_compressed = [] + for msg in MESSAGES: + for domain in DOMAINS: + case02_message_hash_G2_compressed.append({ + 'input': {'message': '0x' + msg.hex(), 'domain': int_to_hex(domain)}, + 'output': hash_message_compressed(msg, domain) + }) + + case03_private_to_public_key = [] + #  Used in later cases + pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS] + #  Used in public key aggregation + pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys] + case03_private_to_public_key = [ + { + 'input': int_to_hex(privkey), + 'output': pubkey_serial, + } + for privkey, pubkey_serial in zip(PRIVKEYS, pubkeys_serial) + ] + + case04_sign_messages = [] + sigs = [] # used in verify + for privkey in PRIVKEYS: + for message in MESSAGES: + for domain in DOMAINS: + sig = bls.sign(message, privkey, domain) + case04_sign_messages.append({ + 'input': { + 'privkey': int_to_hex(privkey), + 'message': '0x' + message.hex(), + 'domain': int_to_hex(domain) + }, + 'output': '0x' + sig.hex() + }) + sigs.append(sig) + + # TODO: case05_verify_messages: Verify messages signed in case04 + # It takes too long, empty for now + + case06_aggregate_sigs = [] + for domain in DOMAINS: + for message in MESSAGES: + sigs = [] + for privkey in PRIVKEYS: + sig = bls.sign(message, privkey, domain) + sigs.append(sig) + case06_aggregate_sigs.append({ + 'input': ['0x' + sig.hex() for sig in sigs], + 'output': '0x' + bls.aggregate_signatures(sigs).hex(), + }) + + case07_aggregate_pubkeys = [ + { + 'input': pubkeys_serial, + 'output': '0x' + bls.aggregate_pubkeys(pubkeys).hex(), + } + ] + + # TODO + # Aggregate verify + + # TODO + # Proof-of-possession + + with open(sys.argv[2] + "test_bls.yml", 'w') as outfile: + # Dump at top level + yaml.dump(metadata, outfile, default_flow_style=False) + # default_flow_style will unravel "ValidatorRecord" and "committee" line, + # exploding file size + yaml.dump( + {'case01_message_hash_G2_uncompressed': case01_message_hash_G2_uncompressed}, + outfile, + ) + yaml.dump( + {'case02_message_hash_G2_compressed': case02_message_hash_G2_compressed}, + outfile, + ) + yaml.dump( + {'case03_private_to_public_key': case03_private_to_public_key}, + outfile, + ) + yaml.dump({'case04_sign_messages': case04_sign_messages}, outfile) + + # Too time consuming to generate + # yaml.dump({'case05_verify_messages': case05_verify_messages}, outfile) + yaml.dump({'case06_aggregate_sigs': case06_aggregate_sigs}, outfile) + yaml.dump({'case07_aggregate_pubkeys': case07_aggregate_pubkeys}, outfile) diff --git a/test_generators/bls/requirements.txt b/test_generators/bls/requirements.txt new file mode 100644 index 000000000..3989a3a0f --- /dev/null +++ b/test_generators/bls/requirements.txt @@ -0,0 +1,2 @@ +py-ecc==1.6.0 +PyYAML==4.2b1 diff --git a/test_generators/shuffling/README.md b/test_generators/shuffling/README.md new file mode 100644 index 000000000..047a1b872 --- /dev/null +++ b/test_generators/shuffling/README.md @@ -0,0 +1,16 @@ +# Shuffling Test Generator + +``` +2018 Status Research & Development GmbH +Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). + +This work uses public domain work under CC0 from the Ethereum Foundation +https://github.com/ethereum/eth2.0-specs +``` + + +This file implements a test vectors generator for the shuffling algorithm described in the Ethereum +[specs](https://github.com/ethereum/eth2.0-specs/blob/2983e68f0305551083fac7fcf9330c1fc9da3411/specs/core/0_beacon-chain.md#get_new_shuffling) + +Utilizes 'swap or not' shuffling found in [An Enciphering Scheme Based on a Card Shuffle](https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf). +See the `Generalized domain` algorithm on page 3. diff --git a/test_generators/shuffling/constants.py b/test_generators/shuffling/constants.py new file mode 100644 index 000000000..92862f898 --- /dev/null +++ b/test_generators/shuffling/constants.py @@ -0,0 +1,6 @@ +SLOTS_PER_EPOCH = 2**6 # 64 slots, 6.4 minutes +FAR_FUTURE_EPOCH = 2**64 - 1 # uint64 max +SHARD_COUNT = 2**10 # 1024 +TARGET_COMMITTEE_SIZE = 2**7 # 128 validators +ACTIVATION_EXIT_DELAY = 2**2 # 4 epochs +SHUFFLE_ROUND_COUNT = 90 diff --git a/test_generators/shuffling/core_helpers.py b/test_generators/shuffling/core_helpers.py new file mode 100644 index 000000000..c424b771e --- /dev/null +++ b/test_generators/shuffling/core_helpers.py @@ -0,0 +1,95 @@ +from typing import Any, List, NewType + +from constants import SLOTS_PER_EPOCH, SHARD_COUNT, TARGET_COMMITTEE_SIZE, SHUFFLE_ROUND_COUNT +from utils import hash +from yaml_objects import Validator + +Epoch = NewType("Epoch", int) +ValidatorIndex = NewType("ValidatorIndex", int) +Bytes32 = NewType("Bytes32", bytes) + + +def int_to_bytes1(x): + return x.to_bytes(1, 'little') + + +def int_to_bytes4(x): + return x.to_bytes(4, 'little') + + +def bytes_to_int(data: bytes) -> int: + return int.from_bytes(data, 'little') + + +def is_active_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is active. + """ + return validator.activation_epoch <= epoch < validator.exit_epoch + + +def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: + """ + Get indices of active validators from ``validators``. + """ + return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] + + +def split(values: List[Any], split_count: int) -> List[List[Any]]: + """ + Splits ``values`` into ``split_count`` pieces. + """ + list_length = len(values) + return [ + values[(list_length * i // split_count): (list_length * (i + 1) // split_count)] + for i in range(split_count) + ] + + +def get_epoch_committee_count(active_validator_count: int) -> int: + """ + Return the number of committees in one epoch. + """ + return max( + 1, + min( + SHARD_COUNT // SLOTS_PER_EPOCH, + active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + ) + ) * SLOTS_PER_EPOCH + + +def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: + """ + Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. + + Utilizes 'swap or not' shuffling found in + https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf + See the 'generalized domain' algorithm on page 3. + """ + for round in range(SHUFFLE_ROUND_COUNT): + pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size + flip = (pivot - index) % list_size + position = max(index, flip) + source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) + byte = source[(position % 256) // 8] + bit = (byte >> (position % 8)) % 2 + index = flip if bit else index + + return index + + +def get_shuffling(seed: Bytes32, + validators: List[Validator], + epoch: Epoch) -> List[List[ValidatorIndex]]: + """ + Shuffle active validators and split into crosslink committees. + Return a list of committees (each a list of validator indices). + """ + # Shuffle active validator indices + active_validator_indices = get_active_validator_indices(validators, epoch) + length = len(active_validator_indices) + shuffled_indices = [active_validator_indices[get_permuted_index(i, length, seed)] for i in range(length)] + + # Split the shuffled active validator indices + return split(shuffled_indices, get_epoch_committee_count(length)) diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py new file mode 100644 index 000000000..03352944a --- /dev/null +++ b/test_generators/shuffling/main.py @@ -0,0 +1,160 @@ +import random +import sys +import os + +import yaml + +from constants import ACTIVATION_EXIT_DELAY, FAR_FUTURE_EPOCH +from core_helpers import get_shuffling +from yaml_objects import Validator + + +def noop(self, *args, **kw): + # Prevent !!str or !!binary tags + pass + + +yaml.emitter.Emitter.process_tag = noop + + +EPOCH = 1000 # The epoch, also a mean for the normal distribution + +# Standard deviation, around 8% validators will activate or exit within +# ENTRY_EXIT_DELAY inclusive from EPOCH thus creating an edge case for validator +# shuffling +RAND_EPOCH_STD = 35 +MAX_EXIT_EPOCH = 5000 # Maximum exit_epoch for easier reading + + +def active_exited_validators_generator(): + """ + Random cases with variety of validator's activity status + """ + # Order not preserved - https://github.com/yaml/pyyaml/issues/110 + metadata = { + 'title': 'Shuffling Algorithm Tests 1', + 'summary': 'Test vectors for validator shuffling with different validator\'s activity status.' + ' Note: only relevant validator fields are defined.', + 'test_suite': 'shuffle', + 'fork': 'phase0-0.5.0', + } + + # Config + random.seed(int("0xEF00BEAC", 16)) + num_cases = 10 + + test_cases = [] + + for case in range(num_cases): + seedhash = bytes(random.randint(0, 255) for byte in range(32)) + idx_max = random.randint(128, 512) + + validators = [] + for idx in range(idx_max): + v = Validator(original_index=idx) + # 4/5 of all validators are active + if random.random() < 0.8: + # Choose a normally distributed epoch number + rand_epoch = round(random.gauss(EPOCH, RAND_EPOCH_STD)) + + # for 1/2 of *active* validators rand_epoch is the activation epoch + if random.random() < 0.5: + v.activation_epoch = rand_epoch + + # 1/4 of active validators will exit in forseeable future + if random.random() < 0.5: + v.exit_epoch = random.randint( + rand_epoch + ACTIVATION_EXIT_DELAY + 1, MAX_EXIT_EPOCH) + # 1/4 of active validators in theory remain in the set indefinitely + else: + v.exit_epoch = FAR_FUTURE_EPOCH + # for the other active 1/2 rand_epoch is the exit epoch + else: + v.activation_epoch = random.randint( + 0, rand_epoch - ACTIVATION_EXIT_DELAY) + v.exit_epoch = rand_epoch + + # The remaining 1/5 of all validators is not activated + else: + v.activation_epoch = FAR_FUTURE_EPOCH + v.exit_epoch = FAR_FUTURE_EPOCH + + validators.append(v) + + input_ = { + 'validators': validators, + 'epoch': EPOCH + } + output = get_shuffling( + seedhash, validators, input_['epoch']) + + test_cases.append({ + 'seed': '0x' + seedhash.hex(), 'input': input_, 'output': output + }) + + return { + 'metadata': metadata, + 'filename': 'test_vector_shuffling.yml', + 'test_cases': test_cases + } + + +def validators_set_size_variety_generator(): + """ + Different validator set size cases, inspired by removed manual `permutated_index` tests + https://github.com/ethereum/eth2.0-test-generators/tree/bcd9ab2933d9f696901d1dfda0828061e9d3093f/permutated_index + """ + # Order not preserved - https://github.com/yaml/pyyaml/issues/110 + metadata = { + 'title': 'Shuffling Algorithm Tests 2', + 'summary': 'Test vectors for validator shuffling with different validator\'s set size.' + ' Note: only relevant validator fields are defined.', + 'test_suite': 'shuffle', + 'fork': 'tchaikovsky', + 'version': 1.0 + } + + # Config + random.seed(int("0xEF00BEAC", 16)) + + test_cases = [] + + seedhash = bytes(random.randint(0, 255) for byte in range(32)) + idx_max = 4096 + set_sizes = [1, 2, 3, 1024, idx_max] + + for size in set_sizes: + validators = [] + for idx in range(size): + v = Validator(original_index=idx) + v.activation_epoch = EPOCH + v.exit_epoch = FAR_FUTURE_EPOCH + validators.append(v) + input_ = { + 'validators': validators, + 'epoch': EPOCH + } + output = get_shuffling( + seedhash, validators, input_['epoch']) + + test_cases.append({ + 'seed': '0x' + seedhash.hex(), 'input': input_, 'output': output + }) + + return { + 'metadata': metadata, + 'filename': 'shuffling_set_size.yml', + 'test_cases': test_cases + } + + +if __name__ == '__main__': + output_dir = sys.argv[2] + for generator in [active_exited_validators_generator, validators_set_size_variety_generator]: + result = generator() + filename = os.path.join(output_dir, result['filename']) + with open(filename, 'w') as outfile: + # Dump at top level + yaml.dump(result['metadata'], outfile, default_flow_style=False) + # default_flow_style will unravel "ValidatorRecord" and "committee" line, exploding file size + yaml.dump({'test_cases': result['test_cases']}, outfile) diff --git a/test_generators/shuffling/requirements.txt b/test_generators/shuffling/requirements.txt new file mode 100644 index 000000000..dde2fb67d --- /dev/null +++ b/test_generators/shuffling/requirements.txt @@ -0,0 +1,4 @@ +eth-hash[pycryptodome]==0.2.0 +eth-typing==2.0.0 +eth-utils==1.4.1 +PyYAML==4.2b1 diff --git a/test_generators/shuffling/utils.py b/test_generators/shuffling/utils.py new file mode 100644 index 000000000..bcd2c6a3c --- /dev/null +++ b/test_generators/shuffling/utils.py @@ -0,0 +1,6 @@ +from eth_typing import Hash32 +from eth_utils import keccak + + +def hash(x: bytes) -> Hash32: + return keccak(x) diff --git a/test_generators/shuffling/yaml_objects.py b/test_generators/shuffling/yaml_objects.py new file mode 100644 index 000000000..18e45220e --- /dev/null +++ b/test_generators/shuffling/yaml_objects.py @@ -0,0 +1,25 @@ +from typing import Any + +import yaml + + +class Validator(yaml.YAMLObject): + """ + A validator stub containing only the fields relevant for get_shuffling() + """ + fields = { + 'activation_epoch': 'uint64', + 'exit_epoch': 'uint64', + # Extra index field to ease testing/debugging + 'original_index': 'uint64', + } + + def __init__(self, **kwargs): + for k in self.fields.keys(): + setattr(self, k, kwargs.get(k)) + + def __setattr__(self, name: str, value: Any) -> None: + super().__setattr__(name, value) + + def __getattribute__(self, name: str) -> Any: + return super().__getattribute__(name) diff --git a/test_generators/ssz/__init__.py b/test_generators/ssz/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py new file mode 100644 index 000000000..d19ec12b4 --- /dev/null +++ b/test_generators/ssz/main.py @@ -0,0 +1,84 @@ +import argparse +import pathlib +import sys + +from ruamel.yaml import ( + YAML, +) + +from uint_test_generators import ( + generate_uint_bounds_test, + generate_uint_random_test, + generate_uint_wrong_length_test, +) + +test_generators = [ + generate_uint_random_test, + generate_uint_wrong_length_test, + generate_uint_bounds_test, +] + + +def make_filename_for_test(test): + title = test["title"] + filename = title.lower().replace(" ", "_") + ".yaml" + return pathlib.Path(filename) + + +def validate_output_dir(path_str): + path = pathlib.Path(path_str) + + if not path.exists(): + raise argparse.ArgumentTypeError("Output directory must exist") + + if not path.is_dir(): + raise argparse.ArgumentTypeError("Output path must lead to a directory") + + return path + + +parser = argparse.ArgumentParser( + prog="gen-ssz-tests", + description="Generate YAML test files for SSZ and tree hashing", +) +parser.add_argument( + "-o", + "--output-dir", + dest="output_dir", + required=True, + type=validate_output_dir, + help="directory into which the generated YAML files will be dumped" +) +parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="if set overwrite test files if they exist", +) + + +if __name__ == "__main__": + args = parser.parse_args() + output_dir = args.output_dir + if not args.force: + file_mode = "x" + else: + file_mode = "w" + + yaml = YAML(pure=True) + + print(f"generating {len(test_generators)} test files...") + for test_generator in test_generators: + test = test_generator() + + filename = make_filename_for_test(test) + path = output_dir / filename + + try: + with path.open(file_mode) as f: + yaml.dump(test, f) + except IOError as e: + sys.exit(f'Error when dumping test "{test["title"]}" ({e})') + + print("done.") diff --git a/test_generators/ssz/renderers.py b/test_generators/ssz/renderers.py new file mode 100644 index 000000000..e551ab14c --- /dev/null +++ b/test_generators/ssz/renderers.py @@ -0,0 +1,102 @@ +from collections.abc import ( + Mapping, + Sequence, +) + +from eth_utils import ( + encode_hex, + to_dict, +) + +from ssz.sedes import ( + BaseSedes, + Boolean, + Bytes, + BytesN, + Container, + List, + UInt, +) + + +def render_value(value): + if isinstance(value, bool): + return value + elif isinstance(value, int): + return str(value) + elif isinstance(value, bytes): + return encode_hex(value) + elif isinstance(value, Sequence): + return tuple(render_value(element) for element in value) + elif isinstance(value, Mapping): + return render_dict_value(value) + else: + raise ValueError(f"Cannot render value {value}") + + +@to_dict +def render_dict_value(value): + for key, value in value.items(): + yield key, render_value(value) + + +def render_type_definition(sedes): + if isinstance(sedes, Boolean): + return "bool" + + elif isinstance(sedes, UInt): + return f"uint{sedes.length * 8}" + + elif isinstance(sedes, BytesN): + return f"bytes{sedes.length}" + + elif isinstance(sedes, Bytes): + return f"bytes" + + elif isinstance(sedes, List): + return [render_type_definition(sedes.element_sedes)] + + elif isinstance(sedes, Container): + return { + field_name: render_type_definition(field_sedes) + for field_name, field_sedes in sedes.fields + } + + elif isinstance(sedes, BaseSedes): + raise Exception("Unreachable: All sedes types have been checked") + + else: + raise TypeError("Expected BaseSedes") + + +@to_dict +def render_test_case(*, sedes, valid, value=None, serial=None, description=None, tags=None): + value_and_serial_given = value is not None and serial is not None + if valid: + if not value_and_serial_given: + raise ValueError("For valid test cases, both value and ssz must be present") + else: + if value_and_serial_given: + raise ValueError("For invalid test cases, either value or ssz must not be present") + + if tags is None: + tags = [] + + yield "type", render_type_definition(sedes) + yield "valid", valid + if value is not None: + yield "value", render_value(value) + if serial is not None: + yield "ssz", encode_hex(serial) + if description is not None: + yield description + yield "tags", tags + + +@to_dict +def render_test(*, title, summary, fork, test_cases): + yield "title", title, + if summary is not None: + yield "summary", summary + yield "fork", fork + yield "test_cases", test_cases diff --git a/test_generators/ssz/requirements.txt b/test_generators/ssz/requirements.txt new file mode 100644 index 000000000..88193a01d --- /dev/null +++ b/test_generators/ssz/requirements.txt @@ -0,0 +1,2 @@ +ruamel.yaml==0.15.87 +ssz==0.1.0a2 diff --git a/test_generators/ssz/uint_test_generators.py b/test_generators/ssz/uint_test_generators.py new file mode 100644 index 000000000..c8c841fe7 --- /dev/null +++ b/test_generators/ssz/uint_test_generators.py @@ -0,0 +1,132 @@ +import random + +from eth_utils import ( + to_tuple, +) + +import ssz +from ssz.sedes import ( + UInt, +) +from renderers import ( + render_test, + render_test_case, +) + +random.seed(0) + + +BIT_SIZES = [i for i in range(8, 512 + 1, 8)] +RANDOM_TEST_CASES_PER_BIT_SIZE = 10 +RANDOM_TEST_CASES_PER_LENGTH = 3 + + +def get_random_bytes(length): + return bytes(random.randint(0, 255) for _ in range(length)) + + +def generate_uint_bounds_test(): + test_cases = generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases() + + return render_test( + title="UInt Bounds", + summary="Integers right at or beyond the bounds of the allowed value range", + fork="phase0-0.2.0", + test_cases=test_cases, + ) + + +def generate_uint_random_test(): + test_cases = generate_random_uint_test_cases() + + return render_test( + title="UInt Random", + summary="Random integers chosen uniformly over the allowed value range", + fork="phase0-0.2.0", + test_cases=test_cases, + ) + + +def generate_uint_wrong_length_test(): + test_cases = generate_uint_wrong_length_test_cases() + + return render_test( + title="UInt Wrong Length", + summary="Serialized integers that are too short or too long", + fork="phase0-0.2.0", + test_cases=test_cases, + ) + + +@to_tuple +def generate_random_uint_test_cases(): + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + + for _ in range(RANDOM_TEST_CASES_PER_BIT_SIZE): + value = random.randrange(0, 2 ** bit_size) + serial = ssz.encode(value, sedes) + # note that we need to create the tags in each loop cycle, otherwise ruamel will use + # YAML references which makes the resulting file harder to read + tags = tuple(["atomic", "uint", "random"]) + yield render_test_case( + sedes=sedes, + valid=True, + value=value, + serial=serial, + tags=tags, + ) + + +@to_tuple +def generate_uint_wrong_length_test_cases(): + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + lengths = sorted({ + 0, + sedes.length // 2, + sedes.length - 1, + sedes.length + 1, + sedes.length * 2, + }) + for length in lengths: + for _ in range(RANDOM_TEST_CASES_PER_LENGTH): + tags = tuple(["atomic", "uint", "wrong_length"]) + yield render_test_case( + sedes=sedes, + valid=False, + serial=get_random_bytes(length), + tags=tags, + ) + + +@to_tuple +def generate_uint_bounds_test_cases(): + common_tags = ("atomic", "uint") + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + + for value, tag in ((0, "uint_lower_bound"), (2 ** bit_size - 1, "uint_upper_bound")): + serial = ssz.encode(value, sedes) + yield render_test_case( + sedes=sedes, + valid=True, + value=value, + serial=serial, + tags=common_tags + (tag,), + ) + + +@to_tuple +def generate_uint_out_of_bounds_test_cases(): + common_tags = ("atomic", "uint") + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + + for value, tag in ((-1, "uint_underflow"), (2 ** bit_size, "uint_overflow")): + yield render_test_case( + sedes=sedes, + valid=False, + value=value, + tags=common_tags + (tag,), + ) From c7da23e6dadb3c1e7cc75d97d1dc0e7237198ecc Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 00:35:46 +0800 Subject: [PATCH 119/481] update ci config and makefile --- .circleci/config.yml | 95 +++++++++++++++++++++++++++++++++----------- Makefile | 73 +++++++++++++++++++++++++++------- 2 files changed, 130 insertions(+), 38 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 02871530e..411eb9230 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,4 @@ -# Python CircleCI 2.0 configuration file -version: 2 +version: 2.1 jobs: build: docker: @@ -8,34 +7,82 @@ jobs: steps: - checkout - # Download and cache dependencies - - restore_cache: - keys: - - v1-dependencies-{{ checksum "requirements.txt" }} - # fallback to using the latest cache if no exact match is found - - v1-dependencies- + - run: + name: Build phase0 spec + command: make phase0 - run: - name: install dependencies - command: | - python3 -m venv venv - . venv/bin/activate - pip install -r requirements.txt - - run: - name: build phase0 spec - command: make build/phase0 - - - save_cache: - paths: - - ./venv - key: v1-dependencies-{{ checksum "requirements.txt" }} - - - run: - name: run tests + name: run py-tests command: | . venv/bin/activate pytest tests + - run: + name: Generate YAML tests + command: make yaml_tests + - store_artifacts: path: test-reports destination: test-reports + + - run: + name: Save YAML tests for deployment + command: | + mkdir /tmp/workspace + cp -r yaml_tests /tmp/workspace/ + git log -1 >> /tmp/workspace/latest_commit_message + - persist_to_workspace: + root: /tmp/workspace + paths: + - yaml_tests + - latest_commit_message + commit: + docker: + - image: circleci/python:3.6 + steps: + - attach_workspace: + at: /tmp/workspace + - add_ssh_keys: + fingerprints: + - "01:85:b6:36:96:a6:84:72:e4:9b:4e:38:ee:21:97:fa" + - run: + name: Checkout test repository + command: | + ssh-keyscan -H github.com >> ~/.ssh/known_hosts + git clone git@github.com:ethereum/eth2.0-tests.git + - run: + name: Commit and push generated YAML tests + command: | + cd eth2.0-tests + git config user.name 'eth2TestGenBot' + git config user.email '47188154+eth2TestGenBot@users.noreply.github.com' + for filename in /tmp/workspace/yaml_tests/*; do + rm -rf $(basename $filename) + cp -r $filename . + done + git add . + if git diff --cached --exit-code >& /dev/null; then + echo "No changes to commit" + else + echo -e "Update generated tests\n\nLatest commit message from eth2.0-specs:\n" > commit_message + cat /tmp/workspace/latest_commit_message >> commit_message + git commit -F commit_message + git push origin master + fi +workflows: + version: 2.1 + + build_and_commit: + jobs: + - build: + filters: + tags: + only: /.*/ + - commit: + requires: + - build + filters: + tags: + only: /.*/ + branches: + ignore: /.*/ \ No newline at end of file diff --git a/Makefile b/Makefile index 88f17dcf9..f6313f64d 100644 --- a/Makefile +++ b/Makefile @@ -1,29 +1,74 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts -BUILD_DIR = ./build -UTILS_DIR = ./utils +TEST_LIBS_DIR = ./test_libs +PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec + +YAML_TEST_DIR = ./yaml_tests +GENERATOR_DIR = ./test_generators +GENERATOR_VENVS_DIR = $(GENERATOR_DIR)/.venvs + +# Collect a list of generator names +GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) +# Map this list of generator paths to a list of test output paths +YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) + +PY_SPEC_PHASE_0_TARGET = $(PY_SPEC_DIR)/phase0/spec.py +PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGET) -.PHONY: clean all test - - -all: $(BUILD_DIR)/phase0 +.PHONY: clean all test yaml_tests pyspec phase0 +all: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) $(PY_SPEC_ALL_TARGETS) clean: - rm -rf $(BUILD_DIR) + rm -rf $(YAML_TEST_DIR) + rm -rf $(GENERATOR_VENVS_DIR) + rm -rf $(PY_SPEC_ALL_TARGETS) +# "make yaml_tests" to run generators +yaml_tests: $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config # run pytest with `-m` option to full suite -test: +test: $(PY_SPEC_TARGETS) pytest -m minimal_config tests/ +# "make pyspec" to create the pyspec for all phases. +pyspec: $(PY_SPEC_TARGETS) -$(BUILD_DIR)/phase0: - mkdir -p $@ +# "make phase0" to create pyspec for phase0 +phase0: $(PY_SPEC_DIR)/phase0/spec.py + + +$(PY_SPEC_DIR)/phase0/spec.py: python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py - mkdir -p $@/utils - cp $(UTILS_DIR)/phase0/* $@/utils - cp $(UTILS_DIR)/phase0/state_transition.py $@ - touch $@/__init__.py $@/utils/__init__.py + + + +# The function that builds a set of suite files, by calling a generator for the given type (param 1) +define build_yaml_tests + $(info running generator $(1)) + # Create the output + mkdir -p $(YAML_TEST_DIR)$(1) + + # Create a virtual environment + python3 -m venv $(VENV_DIR)$(1) + # Activate the venv, this is where dependencies are installed for the generator + . $(GENERATOR_VENVS_DIR)$(1)bin/activate + # Install all the necessary requirements + pip3 install -r $(GENERATOR_DIR)$(1)requirements.txt --user + + # Run the generator. The generator is assumed to have an "main.py" file. + # We output to the tests dir (generator program should accept a "-p " argument. + python3 $(GENERATOR_DIR)$(1)main.py -o $(YAML_TEST_DIR)$(1) + $(info generator $(1) finished) +endef + +# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary) +$(YAML_TEST_DIR): + $(info ${YAML_TEST_TARGETS}) + mkdir -p $@ + +# For any target within the tests dir, build it using the build_yaml_tests function. +$(YAML_TEST_DIR)%: + $(call build_yaml_tests,$*) From 645682553d5e9320b4a82062f531ae79613e3bda Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 00:36:15 +0800 Subject: [PATCH 120/481] Update readme with links to spec contributor docs --- README.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c5c88daf9..294dd439d 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,8 @@ To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https:/ This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests. -# Specs + +## Specs Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: * [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md) @@ -18,10 +19,20 @@ Accompanying documents can be found in [specs](specs) and include * [General test format](specs/test-format.md) * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) -## Design goals + +### Design goals + The following are the broad design goals for Ethereum 2.0: * to minimize complexity, even at the cost of some losses in efficiency * to remain live through major network partitions and when very large portions of nodes go offline * to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) + + +## For spec contributors + +Documentation on the different components used during spec writing can be found here: +* [YAML Test Generators](test_generators/README.md) +* [Executable Python Spec](test_libs/pyspec/README.md) + From bf951688b08126d526eac45234da30f49f21b1bd Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 00:43:56 +0800 Subject: [PATCH 121/481] update pyspec readme --- test_libs/pyspec/README.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 2747ad8f3..25ee737f7 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -1,3 +1,17 @@ # ETH 2.0 PySpec -The py \ No newline at end of file +The Python executable spec is built from the ETH 2.0 specification, + complemented with the necessary helper functions for hashing, BLS, and more. + +With this executable spec, + test-generators can easily create test-vectors for client implementations, + and the spec itself can be verified to be consistent and coherent, through sanity tests implemented with pytest. + +Contributions are welcome, but consider implementing your idea as part of the spec itself first. +The pyspec is not a replacement. +If you see opportunity to include any of the `utils/` code in the spec, + please submit an issue or PR. + +## License + +Same as the spec itself, see LICENSE file in spec repository root. From 883ea93545f8ca061523b966c48683ef049eae20 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 00:53:20 +0800 Subject: [PATCH 122/481] update generator helper readme --- test_libs/gen_helpers/README.md | 5 +++++ test_libs/gen_helpers/README.txt | 4 ---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 test_libs/gen_helpers/README.md delete mode 100644 test_libs/gen_helpers/README.txt diff --git a/test_libs/gen_helpers/README.md b/test_libs/gen_helpers/README.md new file mode 100644 index 000000000..4dcfacef7 --- /dev/null +++ b/test_libs/gen_helpers/README.md @@ -0,0 +1,5 @@ +# ETH 2.0 test generator helpers + +`gen_base`: A util to quickly write new test suite generators with. +See [Generators documentation](../../test_generators/README.md). + diff --git a/test_libs/gen_helpers/README.txt b/test_libs/gen_helpers/README.txt deleted file mode 100644 index 10c97f2d6..000000000 --- a/test_libs/gen_helpers/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -ETH 2.0 test generator helpers - -`eth2_test_gen_base`: A util to quickly write new test suite generators with. - From a106edacadf8a1724ae039c73ab76775d049c2b1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 01:21:07 +0800 Subject: [PATCH 123/481] Scope pyspec packages, make pyspec more readable, fix imports --- Makefile | 10 +- scripts/phase0/build_spec.py | 27 +- scripts/phase0/function_puller.py | 4 + test_libs/gen_helpers/setup.py | 5 +- test_libs/pyspec/{debug => eth2}/__init__.py | 0 .../pyspec/{phase0 => eth2/debug}/__init__.py | 0 test_libs/pyspec/{ => eth2}/debug/jsonize.py | 2 +- .../pyspec/{utils => eth2/phase0}/__init__.py | 0 test_libs/pyspec/eth2/phase0/spec.py | 1692 +++++++++++++++++ .../{ => eth2}/phase0/state_transition.py | 6 +- test_libs/pyspec/eth2/utils/__init__.py | 0 test_libs/pyspec/{ => eth2}/utils/bls_stub.py | 0 .../pyspec/{ => eth2}/utils/hash_function.py | 0 .../pyspec/{ => eth2}/utils/merkle_minimal.py | 0 .../pyspec/{ => eth2}/utils/minimal_ssz.py | 0 test_libs/pyspec/setup.py | 2 +- 16 files changed, 1728 insertions(+), 20 deletions(-) rename test_libs/pyspec/{debug => eth2}/__init__.py (100%) rename test_libs/pyspec/{phase0 => eth2/debug}/__init__.py (100%) rename test_libs/pyspec/{ => eth2}/debug/jsonize.py (97%) rename test_libs/pyspec/{utils => eth2/phase0}/__init__.py (100%) create mode 100644 test_libs/pyspec/eth2/phase0/spec.py rename test_libs/pyspec/{ => eth2}/phase0/state_transition.py (97%) create mode 100644 test_libs/pyspec/eth2/utils/__init__.py rename test_libs/pyspec/{ => eth2}/utils/bls_stub.py (100%) rename test_libs/pyspec/{ => eth2}/utils/hash_function.py (100%) rename test_libs/pyspec/{ => eth2}/utils/merkle_minimal.py (100%) rename test_libs/pyspec/{ => eth2}/utils/minimal_ssz.py (100%) diff --git a/Makefile b/Makefile index f6313f64d..3812a8255 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) # Map this list of generator paths to a list of test output paths YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) -PY_SPEC_PHASE_0_TARGET = $(PY_SPEC_DIR)/phase0/spec.py -PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGET) +PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2/phase0/spec.py +PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) .PHONY: clean all test yaml_tests pyspec phase0 @@ -37,11 +37,11 @@ test: $(PY_SPEC_TARGETS) pyspec: $(PY_SPEC_TARGETS) # "make phase0" to create pyspec for phase0 -phase0: $(PY_SPEC_DIR)/phase0/spec.py +phase0: $(PY_SPEC_PHASE_0_TARGETS) -$(PY_SPEC_DIR)/phase0/spec.py: - python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py +$(PY_SPEC_DIR)/eth2/phase0/spec.py: + python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index 6116f1ffe..9f33c5883 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -4,15 +4,8 @@ import function_puller def build_spec(sourcefile, outfile): code_lines = [] - - code_lines.append("from build.phase0.utils.minimal_ssz import *") - code_lines.append("from build.phase0.utils.bls_stub import *") - for i in (1, 2, 3, 4, 8, 32, 48, 96): - code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) - code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var - code_lines.append("def slot_to_epoch(x): return x // SLOTS_PER_EPOCH") - code_lines.append(""" + from typing import ( Any, Callable, @@ -20,6 +13,20 @@ from typing import ( NewType, Tuple, ) +from eth2.utils.minimal_ssz import * +from eth2.utils.bls_stub import * + + + """) + for i in (1, 2, 3, 4, 8, 32, 48, 96): + code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) + + code_lines.append(""" +# stub, will get overwritten by real var +SLOTS_PER_EPOCH = 64 + + +def slot_to_epoch(x): return x // SLOTS_PER_EPOCH Slot = NewType('Slot', int) # uint64 @@ -40,6 +47,8 @@ Store = None # Monkey patch validator get committee code _compute_committee = compute_committee committee_cache = {} + + def compute_committee(validator_indices: List[ValidatorIndex], seed: Bytes32, index: int, @@ -60,6 +69,8 @@ def compute_committee(validator_indices: List[ValidatorIndex], # Monkey patch hash cache _hash = hash hash_cache = {} + + def hash(x): if x in hash_cache: return hash_cache[x] diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index 7d5796fc7..2cd0139c5 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -27,6 +27,10 @@ def get_lines(file_name): code_lines.append('%s = SSZType({' % current_name) processing_typedef = True elif pulling_from is not None: + # Add some whitespace between functions + if line[:3] == 'def': + code_lines.append("") + code_lines.append("") code_lines.append(line) elif pulling_from is None and len(line) > 0 and line[0] == '|': row = line[1:].split('|') diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index a6c65c212..88b971bf3 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -4,5 +4,8 @@ setup( name='gen_helpers', version='1.0', packages=['gen_base'], - install_requires=['ruamel.yaml', 'eth-utils'] + install_requires=[ + "ruamel.yaml==0.15.87", + "eth-utils==1.4.1" + ] ) diff --git a/test_libs/pyspec/debug/__init__.py b/test_libs/pyspec/eth2/__init__.py similarity index 100% rename from test_libs/pyspec/debug/__init__.py rename to test_libs/pyspec/eth2/__init__.py diff --git a/test_libs/pyspec/phase0/__init__.py b/test_libs/pyspec/eth2/debug/__init__.py similarity index 100% rename from test_libs/pyspec/phase0/__init__.py rename to test_libs/pyspec/eth2/debug/__init__.py diff --git a/test_libs/pyspec/debug/jsonize.py b/test_libs/pyspec/eth2/debug/jsonize.py similarity index 97% rename from test_libs/pyspec/debug/jsonize.py rename to test_libs/pyspec/eth2/debug/jsonize.py index ac0243a5d..660e6b070 100644 --- a/test_libs/pyspec/debug/jsonize.py +++ b/test_libs/pyspec/eth2/debug/jsonize.py @@ -1,4 +1,4 @@ -from shared_eth2.minimal_ssz import hash_tree_root +from eth2.utils.minimal_ssz import hash_tree_root def jsonize(value, typ, include_hash_tree_roots=False): diff --git a/test_libs/pyspec/utils/__init__.py b/test_libs/pyspec/eth2/phase0/__init__.py similarity index 100% rename from test_libs/pyspec/utils/__init__.py rename to test_libs/pyspec/eth2/phase0/__init__.py diff --git a/test_libs/pyspec/eth2/phase0/spec.py b/test_libs/pyspec/eth2/phase0/spec.py new file mode 100644 index 000000000..32dc8e743 --- /dev/null +++ b/test_libs/pyspec/eth2/phase0/spec.py @@ -0,0 +1,1692 @@ +from eth2.utils.minimal_ssz import * +from eth2.utils.bls_stub import * +def int_to_bytes1(x): return x.to_bytes(1, 'little') +def int_to_bytes2(x): return x.to_bytes(2, 'little') +def int_to_bytes3(x): return x.to_bytes(3, 'little') +def int_to_bytes4(x): return x.to_bytes(4, 'little') +def int_to_bytes8(x): return x.to_bytes(8, 'little') +def int_to_bytes32(x): return x.to_bytes(32, 'little') +def int_to_bytes48(x): return x.to_bytes(48, 'little') +def int_to_bytes96(x): return x.to_bytes(96, 'little') + + +SLOTS_PER_EPOCH = 64 +def slot_to_epoch(x): return x // SLOTS_PER_EPOCH + + +from typing import ( + Any, + Callable, + List, + NewType, + Tuple, +) + + +Slot = NewType('Slot', int) # uint64 +Epoch = NewType('Epoch', int) # uint64 +Shard = NewType('Shard', int) # uint64 +ValidatorIndex = NewType('ValidatorIndex', int) # uint64 +Gwei = NewType('Gwei', int) # uint64 +Bytes32 = NewType('Bytes32', bytes) # bytes32 +BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 +BLSSignature = NewType('BLSSignature', bytes) # bytes96 +Any = None +Store = None + +SHARD_COUNT = 2**10 +TARGET_COMMITTEE_SIZE = 2**7 +MAX_BALANCE_CHURN_QUOTIENT = 2**5 +MAX_SLASHABLE_ATTESTATION_PARTICIPANTS = 2**12 +MAX_EXIT_DEQUEUES_PER_EPOCH = 2**2 +SHUFFLE_ROUND_COUNT = 90 +DEPOSIT_CONTRACT_ADDRESS = 0x1234567890123567890123456789012357890 +DEPOSIT_CONTRACT_TREE_DEPTH = 2**5 +MIN_DEPOSIT_AMOUNT = 2**0 * 10**9 +MAX_DEPOSIT_AMOUNT = 2**5 * 10**9 +EJECTION_BALANCE = 2**4 * 10**9 +HIGH_BALANCE_INCREMENT = 2**0 * 10**9 +GENESIS_FORK_VERSION = int_to_bytes4(0) +GENESIS_SLOT = 2**32 +GENESIS_EPOCH = slot_to_epoch(GENESIS_SLOT) +GENESIS_START_SHARD = 0 +FAR_FUTURE_EPOCH = 2**64 - 1 +ZERO_HASH = int_to_bytes32(0) +EMPTY_SIGNATURE = int_to_bytes96(0) +BLS_WITHDRAWAL_PREFIX_BYTE = int_to_bytes1(0) +SECONDS_PER_SLOT = 6 +MIN_ATTESTATION_INCLUSION_DELAY = 2**2 +SLOTS_PER_EPOCH = 2**6 +MIN_SEED_LOOKAHEAD = 2**0 +ACTIVATION_EXIT_DELAY = 2**2 +EPOCHS_PER_ETH1_VOTING_PERIOD = 2**4 +SLOTS_PER_HISTORICAL_ROOT = 2**13 +MIN_VALIDATOR_WITHDRAWABILITY_DELAY = 2**8 +PERSISTENT_COMMITTEE_PERIOD = 2**11 +MAX_CROSSLINK_EPOCHS = 2**6 +LATEST_RANDAO_MIXES_LENGTH = 2**13 +LATEST_ACTIVE_INDEX_ROOTS_LENGTH = 2**13 +LATEST_SLASHED_EXIT_LENGTH = 2**13 +BASE_REWARD_QUOTIENT = 2**5 +WHISTLEBLOWER_REWARD_QUOTIENT = 2**9 +ATTESTATION_INCLUSION_REWARD_QUOTIENT = 2**3 +INACTIVITY_PENALTY_QUOTIENT = 2**24 +MIN_PENALTY_QUOTIENT = 2**5 +MAX_PROPOSER_SLASHINGS = 2**4 +MAX_ATTESTER_SLASHINGS = 2**0 +MAX_ATTESTATIONS = 2**7 +MAX_DEPOSITS = 2**4 +MAX_VOLUNTARY_EXITS = 2**4 +MAX_TRANSFERS = 2**4 +DOMAIN_BEACON_BLOCK = 0 +DOMAIN_RANDAO = 1 +DOMAIN_ATTESTATION = 2 +DOMAIN_DEPOSIT = 3 +DOMAIN_VOLUNTARY_EXIT = 4 +DOMAIN_TRANSFER = 5 +Fork = SSZType({ + # Previous fork version + 'previous_version': 'bytes4', + # Current fork version + 'current_version': 'bytes4', + # Fork epoch number + 'epoch': 'uint64', +}) +Crosslink = SSZType({ + # Epoch number + 'epoch': 'uint64', + # Shard data since the previous crosslink + 'crosslink_data_root': 'bytes32', +}) +Eth1Data = SSZType({ + # Root of the deposit tree + 'deposit_root': 'bytes32', + # Total number of deposits + 'deposit_count': 'uint64', + # Block hash + 'block_hash': 'bytes32', +}) +Eth1DataVote = SSZType({ + # Data being voted for + 'eth1_data': Eth1Data, + # Vote count + 'vote_count': 'uint64', +}) +AttestationData = SSZType({ + # LMD GHOST vote + 'slot': 'uint64', + 'beacon_block_root': 'bytes32', + + # FFG vote + 'source_epoch': 'uint64', + 'source_root': 'bytes32', + 'target_root': 'bytes32', + + # Crosslink vote + 'shard': 'uint64', + 'previous_crosslink': Crosslink, + 'crosslink_data_root': 'bytes32', +}) +AttestationDataAndCustodyBit = SSZType({ + # Attestation data + 'data': AttestationData, + # Custody bit + 'custody_bit': 'bool', +}) +SlashableAttestation = SSZType({ + # Validator indices + 'validator_indices': ['uint64'], + # Attestation data + 'data': AttestationData, + # Custody bitfield + 'custody_bitfield': 'bytes', + # Aggregate signature + 'aggregate_signature': 'bytes96', +}) +DepositData = SSZType({ + # BLS pubkey + 'pubkey': 'bytes48', + # Withdrawal credentials + 'withdrawal_credentials': 'bytes32', + # Amount in Gwei + 'amount': 'uint64', + # Container self-signature + 'proof_of_possession': 'bytes96', +}) +BeaconBlockHeader = SSZType({ + 'slot': 'uint64', + 'previous_block_root': 'bytes32', + 'state_root': 'bytes32', + 'block_body_root': 'bytes32', + 'signature': 'bytes96', +}) +Validator = SSZType({ + # BLS public key + 'pubkey': 'bytes48', + # Withdrawal credentials + 'withdrawal_credentials': 'bytes32', + # Epoch when validator activated + 'activation_epoch': 'uint64', + # Epoch when validator exited + 'exit_epoch': 'uint64', + # Epoch when validator is eligible to withdraw + 'withdrawable_epoch': 'uint64', + # Did the validator initiate an exit + 'initiated_exit': 'bool', + # Was the validator slashed + 'slashed': 'bool', + # Rounded balance + 'high_balance': 'uint64' +}) +PendingAttestation = SSZType({ + # Attester aggregation bitfield + 'aggregation_bitfield': 'bytes', + # Attestation data + 'data': AttestationData, + # Custody bitfield + 'custody_bitfield': 'bytes', + # Inclusion slot + 'inclusion_slot': 'uint64', +}) +HistoricalBatch = SSZType({ + # Block roots + 'block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + # State roots + 'state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], +}) +ProposerSlashing = SSZType({ + # Proposer index + 'proposer_index': 'uint64', + # First block header + 'header_1': BeaconBlockHeader, + # Second block header + 'header_2': BeaconBlockHeader, +}) +AttesterSlashing = SSZType({ + # First slashable attestation + 'slashable_attestation_1': SlashableAttestation, + # Second slashable attestation + 'slashable_attestation_2': SlashableAttestation, +}) +Attestation = SSZType({ + # Attester aggregation bitfield + 'aggregation_bitfield': 'bytes', + # Attestation data + 'data': AttestationData, + # Custody bitfield + 'custody_bitfield': 'bytes', + # BLS aggregate signature + 'aggregate_signature': 'bytes96', +}) +Deposit = SSZType({ + # Branch in the deposit tree + 'proof': ['bytes32', DEPOSIT_CONTRACT_TREE_DEPTH], + # Index in the deposit tree + 'index': 'uint64', + # Data + 'data': DepositData, +}) +VoluntaryExit = SSZType({ + # Minimum epoch for processing exit + 'epoch': 'uint64', + # Index of the exiting validator + 'validator_index': 'uint64', + # Validator signature + 'signature': 'bytes96', +}) +Transfer = SSZType({ + # Sender index + 'sender': 'uint64', + # Recipient index + 'recipient': 'uint64', + # Amount in Gwei + 'amount': 'uint64', + # Fee in Gwei for block proposer + 'fee': 'uint64', + # Inclusion slot + 'slot': 'uint64', + # Sender withdrawal pubkey + 'pubkey': 'bytes48', + # Sender signature + 'signature': 'bytes96', +}) +BeaconBlockBody = SSZType({ + 'randao_reveal': 'bytes96', + 'eth1_data': Eth1Data, + 'proposer_slashings': [ProposerSlashing], + 'attester_slashings': [AttesterSlashing], + 'attestations': [Attestation], + 'deposits': [Deposit], + 'voluntary_exits': [VoluntaryExit], + 'transfers': [Transfer], +}) +BeaconBlock = SSZType({ + # Header + 'slot': 'uint64', + 'previous_block_root': 'bytes32', + 'state_root': 'bytes32', + 'body': BeaconBlockBody, + 'signature': 'bytes96', +}) +BeaconState = SSZType({ + # Misc + 'slot': 'uint64', + 'genesis_time': 'uint64', + 'fork': Fork, # For versioning hard forks + + # Validator registry + 'validator_registry': [Validator], + 'balances': ['uint64'], + 'validator_registry_update_epoch': 'uint64', + + # Randomness and committees + 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], + 'latest_start_shard': 'uint64', + + # Finality + 'previous_epoch_attestations': [PendingAttestation], + 'current_epoch_attestations': [PendingAttestation], + 'previous_justified_epoch': 'uint64', + 'current_justified_epoch': 'uint64', + 'previous_justified_root': 'bytes32', + 'current_justified_root': 'bytes32', + 'justification_bitfield': 'uint64', + 'finalized_epoch': 'uint64', + 'finalized_root': 'bytes32', + + # Recent state + 'latest_crosslinks': [Crosslink, SHARD_COUNT], + 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], + 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], + 'latest_slashed_balances': ['uint64', LATEST_SLASHED_EXIT_LENGTH], # Balances slashed at every withdrawal period + 'latest_block_header': BeaconBlockHeader, # `latest_block_header.state_root == ZERO_HASH` temporarily + 'historical_roots': ['bytes32'], + + # Ethereum 1.0 chain data + 'latest_eth1_data': Eth1Data, + 'eth1_data_votes': [Eth1DataVote], + 'deposit_index': 'uint64', +}) + + +def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: + return bytes(a ^ b for a, b in zip(bytes1, bytes2)) + + +def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: + """ + Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. + """ + return BeaconBlockHeader( + slot=block.slot, + previous_block_root=block.previous_block_root, + state_root=ZERO_HASH, + block_body_root=hash_tree_root(block.body), + # signed_root(block) is used for block id purposes so signature is a stub + signature=EMPTY_SIGNATURE, + ) + + +def slot_to_epoch(slot: Slot) -> Epoch: + """ + Return the epoch number of the given ``slot``. + """ + return slot // SLOTS_PER_EPOCH + + +def get_previous_epoch(state: BeaconState) -> Epoch: + """` + Return the previous epoch of the given ``state``. + """ + return get_current_epoch(state) - 1 + + +def get_current_epoch(state: BeaconState) -> Epoch: + """ + Return the current epoch of the given ``state``. + """ + return slot_to_epoch(state.slot) + + +def get_epoch_start_slot(epoch: Epoch) -> Slot: + """ + Return the starting slot of the given ``epoch``. + """ + return epoch * SLOTS_PER_EPOCH + + +def is_active_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is active. + """ + return validator.activation_epoch <= epoch < validator.exit_epoch + + +def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: + """ + Check if ``validator`` is slashable. + """ + return ( + validator.activation_epoch <= epoch < validator.withdrawable_epoch and + validator.slashed is False + ) + + +def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: + """ + Get indices of active validators from ``validators``. + """ + return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] + + +def get_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: + """ + Return the balance for a validator with the given ``index``. + """ + return state.balances[index] + + +def set_balance(state: BeaconState, index: ValidatorIndex, balance: Gwei) -> None: + """ + Set the balance for a validator with the given ``index`` in both ``BeaconState`` + and validator's rounded balance ``high_balance``. + """ + validator = state.validator_registry[index] + HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 + if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: + validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT + state.balances[index] = balance + + +def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Increase the balance for a validator with the given ``index`` by ``delta``. + """ + set_balance(state, index, get_balance(state, index) + delta) + + +def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: + """ + Decrease the balance for a validator with the given ``index`` by ``delta``. + Set to ``0`` when underflow. + """ + current_balance = get_balance(state, index) + set_balance(state, index, current_balance - delta if current_balance >= delta else 0) + + +def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: + """ + Return `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy. + + Utilizes 'swap or not' shuffling found in + https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf + See the 'generalized domain' algorithm on page 3. + """ + assert index < list_size + assert list_size <= 2**40 + + for round in range(SHUFFLE_ROUND_COUNT): + pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size + flip = (pivot - index) % list_size + position = max(index, flip) + source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) + byte = source[(position % 256) // 8] + bit = (byte >> (position % 8)) % 2 + index = flip if bit else index + + return index + + +def get_split_offset(list_size: int, chunks: int, index: int) -> int: + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] + """ + return (list_size * index) // chunks + + +def get_epoch_committee_count(active_validator_count: int) -> int: + """ + Return the number of committees in one epoch. + """ + return max( + 1, + min( + SHARD_COUNT // SLOTS_PER_EPOCH, + active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + ) + ) * SLOTS_PER_EPOCH + + +def compute_committee(validator_indices: List[ValidatorIndex], + seed: Bytes32, + index: int, + total_committees: int) -> List[ValidatorIndex]: + """ + Return the ``index``'th shuffled committee out of a total ``total_committees`` + using ``validator_indices`` and ``seed``. + """ + start_offset = get_split_offset(len(validator_indices), total_committees, index) + end_offset = get_split_offset(len(validator_indices), total_committees, index + 1) + return [ + validator_indices[get_permuted_index(i, len(validator_indices), seed)] + for i in range(start_offset, end_offset) + ] + + +def get_current_epoch_committee_count(state: BeaconState) -> int: + """ + Return the number of committees in the current epoch of the given ``state``. + """ + current_active_validators = get_active_validator_indices( + state.validator_registry, + get_current_epoch(state), + ) + return get_epoch_committee_count(len(current_active_validators)) + + +def get_crosslink_committees_at_slot(state: BeaconState, + slot: Slot) -> List[Tuple[List[ValidatorIndex], Shard]]: + """ + Return the list of ``(committee, shard)`` tuples for the ``slot``. + """ + epoch = slot_to_epoch(slot) + current_epoch = get_current_epoch(state) + previous_epoch = get_previous_epoch(state) + next_epoch = current_epoch + 1 + + assert previous_epoch <= epoch <= next_epoch + indices = get_active_validator_indices( + state.validator_registry, + epoch, + ) + committees_per_epoch = get_epoch_committee_count(len(indices)) + + if epoch == current_epoch: + start_shard = state.latest_start_shard + elif epoch == previous_epoch: + start_shard = (state.latest_start_shard - committees_per_epoch) % SHARD_COUNT + elif epoch == next_epoch: + current_epoch_committees = get_current_epoch_committee_count(state) + start_shard = (state.latest_start_shard + current_epoch_committees) % SHARD_COUNT + + committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH + offset = slot % SLOTS_PER_EPOCH + slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT + seed = generate_seed(state, epoch) + + return [ + ( + compute_committee(indices, seed, committees_per_slot * offset + i, committees_per_epoch), + (slot_start_shard + i) % SHARD_COUNT, + ) + for i in range(committees_per_slot) + ] + + +def get_block_root(state: BeaconState, + slot: Slot) -> Bytes32: + """ + Return the block root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] + + +def get_state_root(state: BeaconState, + slot: Slot) -> Bytes32: + """ + Return the state root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] + + +def get_randao_mix(state: BeaconState, + epoch: Epoch) -> Bytes32: + """ + Return the randao mix at a recent ``epoch``. + """ + assert get_current_epoch(state) - LATEST_RANDAO_MIXES_LENGTH < epoch <= get_current_epoch(state) + return state.latest_randao_mixes[epoch % LATEST_RANDAO_MIXES_LENGTH] + + +def get_active_index_root(state: BeaconState, + epoch: Epoch) -> Bytes32: + """ + Return the index root at a recent ``epoch``. + """ + assert get_current_epoch(state) - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY < epoch <= get_current_epoch(state) + ACTIVATION_EXIT_DELAY + return state.latest_active_index_roots[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] + + +def generate_seed(state: BeaconState, + epoch: Epoch) -> Bytes32: + """ + Generate a seed for the given ``epoch``. + """ + return hash( + get_randao_mix(state, epoch - MIN_SEED_LOOKAHEAD) + + get_active_index_root(state, epoch) + + int_to_bytes32(epoch) + ) + + +def get_beacon_proposer_index(state: BeaconState, + slot: Slot) -> ValidatorIndex: + """ + Return the beacon proposer index for the ``slot``. + Due to proposer selection being based upon the validator balances during + the epoch in question, this can only be run for the current epoch. + """ + current_epoch = get_current_epoch(state) + assert slot_to_epoch(slot) == current_epoch + + first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] + i = 0 + while True: + rand_byte = hash( + generate_seed(state, current_epoch) + + int_to_bytes8(i // 32) + )[i % 32] + candidate = first_committee[(current_epoch + i) % len(first_committee)] + if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: + return candidate + i += 1 + + +def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool: + """ + Verify that the given ``leaf`` is on the merkle branch ``proof`` + starting with the given ``root``. + """ + value = leaf + for i in range(depth): + if index // (2**i) % 2: + value = hash(proof[i] + value) + else: + value = hash(value + proof[i]) + return value == root + + +def get_attestation_participants(state: BeaconState, + attestation_data: AttestationData, + bitfield: bytes) -> List[ValidatorIndex]: + """ + Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. + """ + # Find the committee in the list with the desired shard + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + + assert attestation_data.shard in [shard for _, shard in crosslink_committees] + crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + + assert verify_bitfield(bitfield, len(crosslink_committee)) + + # Find the participating attesters in the committee + participants = [] + for i, validator_index in enumerate(crosslink_committee): + aggregation_bit = get_bitfield_bit(bitfield, i) + if aggregation_bit == 0b1: + participants.append(validator_index) + return participants + + +def bytes_to_int(data: bytes) -> int: + return int.from_bytes(data, 'little') + + +def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: + """ + Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. + """ + return min(get_balance(state, index), MAX_DEPOSIT_AMOUNT) + + +def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei: + """ + Return the combined effective balance of an array of ``validators``. + """ + return sum([get_effective_balance(state, i) for i in validators]) + + +def get_fork_version(fork: Fork, + epoch: Epoch) -> bytes: + """ + Return the fork version of the given ``epoch``. + """ + if epoch < fork.epoch: + return fork.previous_version + else: + return fork.current_version + + +def get_domain(fork: Fork, + epoch: Epoch, + domain_type: int) -> int: + """ + Get the domain number that represents the fork meta and signature domain. + """ + return bytes_to_int(get_fork_version(fork, epoch) + int_to_bytes4(domain_type)) + + +def get_bitfield_bit(bitfield: bytes, i: int) -> int: + """ + Extract the bit in ``bitfield`` at position ``i``. + """ + return (bitfield[i // 8] >> (i % 8)) % 2 + + +def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: + """ + Verify ``bitfield`` against the ``committee_size``. + """ + if len(bitfield) != (committee_size + 7) // 8: + return False + + # Check `bitfield` is padded with zero bits only + for i in range(committee_size, len(bitfield) * 8): + if get_bitfield_bit(bitfield, i) == 0b1: + return False + + return True + + +def verify_slashable_attestation(state: BeaconState, slashable_attestation: SlashableAttestation) -> bool: + """ + Verify validity of ``slashable_attestation`` fields. + """ + if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + return False + + if not (1 <= len(slashable_attestation.validator_indices) <= MAX_SLASHABLE_ATTESTATION_PARTICIPANTS): + return False + + for i in range(len(slashable_attestation.validator_indices) - 1): + if slashable_attestation.validator_indices[i] >= slashable_attestation.validator_indices[i + 1]: + return False + + if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): + return False + + custody_bit_0_indices = [] + custody_bit_1_indices = [] + for i, validator_index in enumerate(slashable_attestation.validator_indices): + if get_bitfield_bit(slashable_attestation.custody_bitfield, i) == 0b0: + custody_bit_0_indices.append(validator_index) + else: + custody_bit_1_indices.append(validator_index) + + return bls_verify_multiple( + pubkeys=[ + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_indices]), + bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), + ], + message_hashes=[ + hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b0)), + hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b1)), + ], + signature=slashable_attestation.aggregate_signature, + domain=get_domain(state.fork, slot_to_epoch(slashable_attestation.data.slot), DOMAIN_ATTESTATION), + ) + + +def is_double_vote(attestation_data_1: AttestationData, + attestation_data_2: AttestationData) -> bool: + """ + Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. + """ + target_epoch_1 = slot_to_epoch(attestation_data_1.slot) + target_epoch_2 = slot_to_epoch(attestation_data_2.slot) + return target_epoch_1 == target_epoch_2 + + +def is_surround_vote(attestation_data_1: AttestationData, + attestation_data_2: AttestationData) -> bool: + """ + Check if ``attestation_data_1`` surrounds ``attestation_data_2``. + """ + source_epoch_1 = attestation_data_1.source_epoch + source_epoch_2 = attestation_data_2.source_epoch + target_epoch_1 = slot_to_epoch(attestation_data_1.slot) + target_epoch_2 = slot_to_epoch(attestation_data_2.slot) + + return source_epoch_1 < source_epoch_2 and target_epoch_2 < target_epoch_1 + + +def integer_squareroot(n: int) -> int: + """ + The largest integer ``x`` such that ``x**2`` is less than or equal to ``n``. + """ + assert n >= 0 + x = n + y = (x + 1) // 2 + while y < x: + x = y + y = (x + n // x) // 2 + return x + + +def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: + """ + Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. + """ + return epoch + 1 + ACTIVATION_EXIT_DELAY + + +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + """ + Process a deposit from Ethereum 1.0. + Note that this function mutates ``state``. + """ + # Deposits must be processed in order + assert deposit.index == state.deposit_index + + # Verify the Merkle branch + merkle_branch_is_valid = verify_merkle_branch( + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization + proof=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH, + index=deposit.index, + root=state.latest_eth1_data.deposit_root, + ) + assert merkle_branch_is_valid + + # Increment the next deposit index we are expecting. Note that this + # needs to be done here because while the deposit contract will never + # create an invalid Merkle branch, it may admit an invalid deposit + # object, and we need to be able to skip over it + state.deposit_index += 1 + + validator_pubkeys = [v.pubkey for v in state.validator_registry] + pubkey = deposit.data.pubkey + amount = deposit.data.amount + + if pubkey not in validator_pubkeys: + # Verify the proof of possession + proof_is_valid = bls_verify( + pubkey=pubkey, + message_hash=signed_root(deposit.data), + signature=deposit.data.proof_of_possession, + domain=get_domain( + state.fork, + get_current_epoch(state), + DOMAIN_DEPOSIT, + ) + ) + if not proof_is_valid: + return + + # Add new validator + validator = Validator( + pubkey=pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + initiated_exit=False, + slashed=False, + high_balance=0 + ) + + # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. + state.validator_registry.append(validator) + state.balances.append(0) + set_balance(state, len(state.validator_registry) - 1, amount) + else: + # Increase balance by deposit amount + index = validator_pubkeys.index(pubkey) + increase_balance(state, index, amount) + + +def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bool) -> None: + """ + Activate the validator of the given ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + + validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) + + +def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: + """ + Initiate the validator of the given ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + validator.initiated_exit = True + + +def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: + """ + Exit the validator with the given ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + + # Update validator exit epoch if not previously exited + if validator.exit_epoch == FAR_FUTURE_EPOCH: + validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + + +def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: + """ + Slash the validator with index ``index``. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + exit_validator(state, index) + state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) + + whistleblower_index = get_beacon_proposer_index(state, state.slot) + whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT + increase_balance(state, whistleblower_index, whistleblower_reward) + decrease_balance(state, index, whistleblower_reward) + validator.slashed = True + validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH + + +def prepare_validator_for_withdrawal(state: BeaconState, index: ValidatorIndex) -> None: + """ + Set the validator with the given ``index`` as withdrawable + ``MIN_VALIDATOR_WITHDRAWABILITY_DELAY`` after the current epoch. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[index] + validator.withdrawable_epoch = get_current_epoch(state) + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + + +def get_empty_block() -> BeaconBlock: + """ + Get an empty ``BeaconBlock``. + """ + return BeaconBlock( + slot=GENESIS_SLOT, + previous_block_root=ZERO_HASH, + state_root=ZERO_HASH, + body=BeaconBlockBody( + randao_reveal=EMPTY_SIGNATURE, + eth1_data=Eth1Data( + deposit_root=ZERO_HASH, + deposit_count=0, + block_hash=ZERO_HASH, + ), + proposer_slashings=[], + attester_slashings=[], + attestations=[], + deposits=[], + voluntary_exits=[], + transfers=[], + ), + signature=EMPTY_SIGNATURE, + ) + + +def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], + genesis_time: int, + genesis_eth1_data: Eth1Data) -> BeaconState: + """ + Get the genesis ``BeaconState``. + """ + state = BeaconState( + # Misc + slot=GENESIS_SLOT, + genesis_time=genesis_time, + fork=Fork( + previous_version=GENESIS_FORK_VERSION, + current_version=GENESIS_FORK_VERSION, + epoch=GENESIS_EPOCH, + ), + + # Validator registry + validator_registry=[], + balances=[], + validator_registry_update_epoch=GENESIS_EPOCH, + + # Randomness and committees + latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), + latest_start_shard=GENESIS_START_SHARD, + + # Finality + previous_epoch_attestations=[], + current_epoch_attestations=[], + previous_justified_epoch=GENESIS_EPOCH - 1, + current_justified_epoch=GENESIS_EPOCH, + previous_justified_root=ZERO_HASH, + current_justified_root=ZERO_HASH, + justification_bitfield=0, + finalized_epoch=GENESIS_EPOCH, + finalized_root=ZERO_HASH, + + # Recent state + latest_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), + latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), + latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), + latest_slashed_balances=Vector([0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)]), + latest_block_header=get_temporary_block_header(get_empty_block()), + historical_roots=[], + + # Ethereum 1.0 chain data + latest_eth1_data=genesis_eth1_data, + eth1_data_votes=[], + deposit_index=0, + ) + + # Process genesis deposits + for deposit in genesis_validator_deposits: + process_deposit(state, deposit) + + # Process genesis activations + for validator_index, _ in enumerate(state.validator_registry): + if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: + activate_validator(state, validator_index, is_genesis=True) + + genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) + for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): + state.latest_active_index_roots[index] = genesis_active_index_root + + return state + + +def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: + """ + Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found. + """ + if block.slot == slot: + return block + elif block.slot < slot: + return None + else: + return get_ancestor(store, store.get_parent(block), slot) + + +def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock: + """ + Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``. + """ + validators = start_state.validator_registry + active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) + attestation_targets = [ + (validator_index, get_latest_attestation_target(store, validator_index)) + for validator_index in active_validator_indices + ] + + # Use the rounded-balance-with-hysteresis supplied by the protocol for fork + # choice voting. This reduces the number of recomputations that need to be + # made for optimized implementations that precompute and save data + def get_vote_count(block: BeaconBlock) -> int: + return sum( + start_state.validator_registry[validator_index].high_balance + for validator_index, target in attestation_targets + if get_ancestor(store, target, block.slot) == block + ) + + head = start_block + while 1: + children = get_children(store, head) + if len(children) == 0: + return head + head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) + + +def cache_state(state: BeaconState) -> None: + previous_slot_state_root = hash_tree_root(state) + + # store the previous slot's post state transition root + state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_slot_state_root + + # cache state root in stored latest_block_header if empty + if state.latest_block_header.state_root == ZERO_HASH: + state.latest_block_header.state_root = previous_slot_state_root + + # store latest known block for previous slot + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signed_root(state.latest_block_header) + + +def get_current_total_balance(state: BeaconState) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_current_epoch(state))) + + +def get_previous_total_balance(state: BeaconState) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_previous_epoch(state))) + + +def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: + output = set() + for a in attestations: + output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) + return sorted(list(output)) + + +def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: + return get_total_balance(state, get_attesting_indices(state, attestations)) + + +def get_current_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: + return [ + a for a in state.current_epoch_attestations + if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_current_epoch(state))) + ] + + +def get_previous_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: + return [ + a for a in state.previous_epoch_attestations + if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) + ] + + +def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[PendingAttestation]: + return [ + a for a in state.previous_epoch_attestations + if a.data.beacon_block_root == get_block_root(state, a.data.slot) + ] + + +def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: + all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations + valid_attestations = [ + a for a in all_attestations if a.data.previous_crosslink == state.latest_crosslinks[shard] + ] + all_roots = [a.data.crosslink_data_root for a in valid_attestations] + + # handle when no attestations for shard available + if len(all_roots) == 0: + return ZERO_HASH, [] + + def get_attestations_for(root) -> List[PendingAttestation]: + return [a for a in valid_attestations if a.data.crosslink_data_root == root] + + # Winning crosslink root is the root with the most votes for it, ties broken in favor of + # lexicographically higher hash + winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) + + return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) + + +def earliest_attestation(state: BeaconState, validator_index: ValidatorIndex) -> PendingAttestation: + return min([ + a for a in state.previous_epoch_attestations if + validator_index in get_attestation_participants(state, a.data, a.aggregation_bitfield) + ], key=lambda a: a.inclusion_slot) + + +def inclusion_slot(state: BeaconState, validator_index: ValidatorIndex) -> Slot: + return earliest_attestation(state, validator_index).inclusion_slot + + +def inclusion_distance(state: BeaconState, validator_index: ValidatorIndex) -> int: + attestation = earliest_attestation(state, validator_index) + return attestation.inclusion_slot - attestation.data.slot + + +def update_justification_and_finalization(state: BeaconState) -> None: + new_justified_epoch = state.current_justified_epoch + new_finalized_epoch = state.finalized_epoch + + # Rotate the justification bitfield up one epoch to make room for the current epoch + state.justification_bitfield <<= 1 + # If the previous epoch gets justified, fill the second last bit + previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) + if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: + new_justified_epoch = get_current_epoch(state) - 1 + state.justification_bitfield |= 2 + # If the current epoch gets justified, fill the last bit + current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) + if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: + new_justified_epoch = get_current_epoch(state) + state.justification_bitfield |= 1 + + # Process finalizations + bitfield = state.justification_bitfield + current_epoch = get_current_epoch(state) + # The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source + if (bitfield >> 1) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 3: + new_finalized_epoch = state.previous_justified_epoch + # The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source + if (bitfield >> 1) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 2: + new_finalized_epoch = state.previous_justified_epoch + # The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source + if (bitfield >> 0) % 8 == 0b111 and state.current_justified_epoch == current_epoch - 2: + new_finalized_epoch = state.current_justified_epoch + # The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source + if (bitfield >> 0) % 4 == 0b11 and state.current_justified_epoch == current_epoch - 1: + new_finalized_epoch = state.current_justified_epoch + + # Update state jusification/finality fields + state.previous_justified_epoch = state.current_justified_epoch + state.previous_justified_root = state.current_justified_root + if new_justified_epoch != state.current_justified_epoch: + state.current_justified_epoch = new_justified_epoch + state.current_justified_root = get_block_root(state, get_epoch_start_slot(new_justified_epoch)) + if new_finalized_epoch != state.finalized_epoch: + state.finalized_epoch = new_finalized_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(new_finalized_epoch)) + + +def process_crosslinks(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) + next_epoch = current_epoch + 1 + for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): + for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): + winning_root, participants = get_winning_root_and_participants(state, shard) + participating_balance = get_total_balance(state, participants) + total_balance = get_total_balance(state, crosslink_committee) + if 3 * participating_balance >= 2 * total_balance: + state.latest_crosslinks[shard] = Crosslink( + epoch=min(slot_to_epoch(slot), state.latest_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), + crosslink_data_root=winning_root + ) + + +def maybe_reset_eth1_period(state: BeaconState) -> None: + if (get_current_epoch(state) + 1) % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: + for eth1_data_vote in state.eth1_data_votes: + # If a majority of all votes were for a particular eth1_data value, + # then set that as the new canonical value + if eth1_data_vote.vote_count * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: + state.latest_eth1_data = eth1_data_vote.eth1_data + state.eth1_data_votes = [] + + +def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: + if get_previous_total_balance(state) == 0: + return 0 + + adjusted_quotient = integer_squareroot(get_previous_total_balance(state)) // BASE_REWARD_QUOTIENT + return get_effective_balance(state, index) // adjusted_quotient // 5 + + +def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: + if epochs_since_finality <= 4: + extra_penalty = 0 + else: + extra_penalty = get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 + return get_base_reward(state, index) + extra_penalty + + +def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + current_epoch = get_current_epoch(state) + epochs_since_finality = current_epoch + 1 - state.finalized_epoch + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] + # Some helper variables + boundary_attestations = get_previous_epoch_boundary_attestations(state) + boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) + total_balance = get_previous_total_balance(state) + total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) + matching_head_attestations = get_previous_epoch_matching_head_attestations(state) + matching_head_balance = get_attesting_balance(state, matching_head_attestations) + eligible_validators = [ + index for index, validator in enumerate(state.validator_registry) + if ( + is_active_validator(validator, current_epoch) or + (validator.slashed and current_epoch < validator.withdrawable_epoch) + ) + ] + # Process rewards or penalties for all validators + for index in eligible_validators: + base_reward = get_base_reward(state, index) + # Expected FFG source + if index in get_attesting_indices(state, state.previous_epoch_attestations): + rewards[index] += base_reward * total_attesting_balance // total_balance + # Inclusion speed bonus + rewards[index] += ( + base_reward * MIN_ATTESTATION_INCLUSION_DELAY // + inclusion_distance(state, index) + ) + else: + penalties[index] += base_reward + # Expected FFG target + if index in get_attesting_indices(state, boundary_attestations): + rewards[index] += base_reward * boundary_attesting_balance // total_balance + else: + penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) + # Expected head + if index in get_attesting_indices(state, matching_head_attestations): + rewards[index] += base_reward * matching_head_balance // total_balance + else: + penalties[index] += base_reward + # Proposer bonus + if index in get_attesting_indices(state, state.previous_epoch_attestations): + proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) + rewards[proposer_index] += base_reward // ATTESTATION_INCLUSION_REWARD_QUOTIENT + # Take away max rewards if we're not finalizing + if epochs_since_finality > 4: + penalties[index] += base_reward * 4 + return [rewards, penalties] + + +def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] + previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) + current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + for slot in range(previous_epoch_start_slot, current_epoch_start_slot): + for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): + winning_root, participants = get_winning_root_and_participants(state, shard) + participating_balance = get_total_balance(state, participants) + total_balance = get_total_balance(state, crosslink_committee) + for index in crosslink_committee: + if index in participants: + rewards[index] += get_base_reward(state, index) * participating_balance // total_balance + else: + penalties[index] += get_base_reward(state, index) + return [rewards, penalties] + + +def apply_rewards(state: BeaconState) -> None: + rewards1, penalties1 = get_justification_and_finalization_deltas(state) + rewards2, penalties2 = get_crosslink_deltas(state) + for i in range(len(state.validator_registry)): + set_balance( + state, + i, + max( + 0, + get_balance(state, i) + rewards1[i] + rewards2[i] - penalties1[i] - penalties2[i], + ), + ) + + +def process_ejections(state: BeaconState) -> None: + """ + Iterate through the validator registry + and eject active validators with balance below ``EJECTION_BALANCE``. + """ + for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): + if get_balance(state, index) < EJECTION_BALANCE: + initiate_validator_exit(state, index) + + +def update_validator_registry(state: BeaconState) -> None: + """ + Update validator registry. + Note that this function mutates ``state``. + """ + current_epoch = get_current_epoch(state) + # The active validators + active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) + # The total effective balance of active validators + total_balance = get_total_balance(state, active_validator_indices) + + # The maximum balance churn in Gwei (for deposits and exits separately) + max_balance_churn = max( + MAX_DEPOSIT_AMOUNT, + total_balance // (2 * MAX_BALANCE_CHURN_QUOTIENT) + ) + + # Activate validators within the allowable balance churn + balance_churn = 0 + for index, validator in enumerate(state.validator_registry): + if validator.activation_epoch == FAR_FUTURE_EPOCH and get_balance(state, index) >= MAX_DEPOSIT_AMOUNT: + # Check the balance churn would be within the allowance + balance_churn += get_effective_balance(state, index) + if balance_churn > max_balance_churn: + break + + # Activate validator + activate_validator(state, index, is_genesis=False) + + # Exit validators within the allowable balance churn + if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: + balance_churn = ( + state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - + state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + ) + + for index, validator in enumerate(state.validator_registry): + if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: + # Check the balance churn would be within the allowance + balance_churn += get_effective_balance(state, index) + if balance_churn > max_balance_churn: + break + + # Exit validator + exit_validator(state, index) + + state.validator_registry_update_epoch = current_epoch + + +def update_registry(state: BeaconState) -> None: + # Check if we should update, and if so, update + if state.finalized_epoch > state.validator_registry_update_epoch: + update_validator_registry(state) + state.latest_start_shard = ( + state.latest_start_shard + + get_current_epoch_committee_count(state) + ) % SHARD_COUNT + + +def process_slashings(state: BeaconState) -> None: + """ + Process the slashings. + Note that this function mutates ``state``. + """ + current_epoch = get_current_epoch(state) + active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) + total_balance = get_total_balance(state, active_validator_indices) + + # Compute `total_penalties` + total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] + total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + total_penalties = total_at_end - total_at_start + + for index, validator in enumerate(state.validator_registry): + if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: + penalty = max( + get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, + get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT + ) + decrease_balance(state, index, penalty) + + +def process_exit_queue(state: BeaconState) -> None: + """ + Process the exit queue. + Note that this function mutates ``state``. + """ + def eligible(index): + validator = state.validator_registry[index] + # Filter out dequeued validators + if validator.withdrawable_epoch != FAR_FUTURE_EPOCH: + return False + # Dequeue if the minimum amount of time has passed + else: + return get_current_epoch(state) >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + + eligible_indices = filter(eligible, list(range(len(state.validator_registry)))) + # Sort in order of exit epoch, and validators that exit within the same epoch exit in order of validator index + sorted_indices = sorted(eligible_indices, key=lambda index: state.validator_registry[index].exit_epoch) + for dequeues, index in enumerate(sorted_indices): + if dequeues >= MAX_EXIT_DEQUEUES_PER_EPOCH: + break + prepare_validator_for_withdrawal(state, index) + + +def finish_epoch_update(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) + next_epoch = current_epoch + 1 + # Set active index root + index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH + state.latest_active_index_roots[index_root_position] = hash_tree_root( + get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY) + ) + # Set total slashed balances + state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = ( + state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] + ) + # Set randao mix + state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch) + # Set historical root accumulator + if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: + historical_batch = HistoricalBatch( + block_roots=state.latest_block_roots, + state_roots=state.latest_state_roots, + ) + state.historical_roots.append(hash_tree_root(historical_batch)) + # Rotate current/previous epoch attestations + state.previous_epoch_attestations = state.current_epoch_attestations + state.current_epoch_attestations = [] + + +def advance_slot(state: BeaconState) -> None: + state.slot += 1 + + +def process_block_header(state: BeaconState, block: BeaconBlock) -> None: + # Verify that the slots match + assert block.slot == state.slot + # Verify that the parent matches + assert block.previous_block_root == signed_root(state.latest_block_header) + # Save current block as the new latest block + state.latest_block_header = get_temporary_block_header(block) + # Verify proposer is not slashed + proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + assert not proposer.slashed + # Verify proposer signature + assert bls_verify( + pubkey=proposer.pubkey, + message_hash=signed_root(block), + signature=block.signature, + domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_BEACON_BLOCK) + ) + + +def process_randao(state: BeaconState, block: BeaconBlock) -> None: + proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + # Verify that the provided randao value is valid + assert bls_verify( + pubkey=proposer.pubkey, + message_hash=hash_tree_root(get_current_epoch(state)), + signature=block.body.randao_reveal, + domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO) + ) + # Mix it in + state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( + xor(get_randao_mix(state, get_current_epoch(state)), + hash(block.body.randao_reveal)) + ) + + +def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: + for eth1_data_vote in state.eth1_data_votes: + # If someone else has already voted for the same hash, add to its counter + if eth1_data_vote.eth1_data == block.body.eth1_data: + eth1_data_vote.vote_count += 1 + return + # If we're seeing this hash for the first time, make a new counter + state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) + + +def process_proposer_slashing(state: BeaconState, + proposer_slashing: ProposerSlashing) -> None: + """ + Process ``ProposerSlashing`` transaction. + Note that this function mutates ``state``. + """ + proposer = state.validator_registry[proposer_slashing.proposer_index] + # Verify that the epoch is the same + assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) + # But the headers are different + assert proposer_slashing.header_1 != proposer_slashing.header_2 + # Check proposer is slashable + assert is_slashable_validator(proposer, get_current_epoch(state)) + # Signatures are valid + for header in (proposer_slashing.header_1, proposer_slashing.header_2): + assert bls_verify( + pubkey=proposer.pubkey, + message_hash=signed_root(header), + signature=header.signature, + domain=get_domain(state.fork, slot_to_epoch(header.slot), DOMAIN_BEACON_BLOCK) + ) + slash_validator(state, proposer_slashing.proposer_index) + + +def process_attester_slashing(state: BeaconState, + attester_slashing: AttesterSlashing) -> None: + """ + Process ``AttesterSlashing`` transaction. + Note that this function mutates ``state``. + """ + attestation1 = attester_slashing.slashable_attestation_1 + attestation2 = attester_slashing.slashable_attestation_2 + # Check that the attestations are conflicting + assert attestation1.data != attestation2.data + assert ( + is_double_vote(attestation1.data, attestation2.data) or + is_surround_vote(attestation1.data, attestation2.data) + ) + assert verify_slashable_attestation(state, attestation1) + assert verify_slashable_attestation(state, attestation2) + slashable_indices = [ + index for index in attestation1.validator_indices + if ( + index in attestation2.validator_indices and + is_slashable_validator(state.validator_registry[index], get_current_epoch(state)) + ) + ] + assert len(slashable_indices) >= 1 + for index in slashable_indices: + slash_validator(state, index) + + +def process_attestation(state: BeaconState, attestation: Attestation) -> None: + """ + Process ``Attestation`` transaction. + Note that this function mutates ``state``. + """ + assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot + assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY + + # Check target epoch, source epoch, and source root + target_epoch = slot_to_epoch(attestation.data.slot) + assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), + (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), + } + + # Check crosslink data + assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] + assert state.latest_crosslinks[attestation.data.shard] in { + attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink + Crosslink( # Case 2: latest crosslink matches current crosslink + crosslink_data_root=attestation.data.crosslink_data_root, + epoch=min(slot_to_epoch(attestation.data.slot), + attestation.data.previous_crosslink.epoch + MAX_CROSSLINK_EPOCHS) + ), + } + + # Check custody bits [to be generalised in phase 1] + assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) + + # Check aggregate signature [to be generalised in phase 1] + participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert len(participants) != 0 + assert bls_verify( + pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), + message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), + signature=attestation.aggregate_signature, + domain=get_domain(state.fork, target_epoch, DOMAIN_ATTESTATION), + ) + + # Cache pending attestation + pending_attestation = PendingAttestation( + data=attestation.data, + aggregation_bitfield=attestation.aggregation_bitfield, + custody_bitfield=attestation.custody_bitfield, + inclusion_slot=state.slot + ) + if target_epoch == get_current_epoch(state): + state.current_epoch_attestations.append(pending_attestation) + else: + state.previous_epoch_attestations.append(pending_attestation) + + +def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: + """ + Process ``VoluntaryExit`` transaction. + Note that this function mutates ``state``. + """ + validator = state.validator_registry[exit.validator_index] + # Verify the validator is active + assert is_active_validator(validator, get_current_epoch(state)) + # Verify the validator has not yet exited + assert validator.exit_epoch == FAR_FUTURE_EPOCH + # Verify the validator has not initiated an exit + assert validator.initiated_exit is False + # Exits must specify an epoch when they become valid; they are not valid before then + assert get_current_epoch(state) >= exit.epoch + # Verify the validator has been active long enough + assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD + # Verify signature + assert bls_verify( + pubkey=validator.pubkey, + message_hash=signed_root(exit), + signature=exit.signature, + domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) + ) + # Initiate exit + initiate_validator_exit(state, exit.validator_index) + + +def process_transfer(state: BeaconState, transfer: Transfer) -> None: + """ + Process ``Transfer`` transaction. + Note that this function mutates ``state``. + """ + # Verify the amount and fee aren't individually too big (for anti-overflow purposes) + assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) + # Verify that we have enough ETH to send, and that after the transfer the balance will be either + # exactly zero or at least MIN_DEPOSIT_AMOUNT + assert ( + get_balance(state, transfer.sender) == transfer.amount + transfer.fee or + get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT + ) + # A transfer is valid in only one slot + assert state.slot == transfer.slot + # Only withdrawn or not-yet-deposited accounts can transfer + assert ( + get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or + state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH + ) + # Verify that the pubkey is valid + assert ( + state.validator_registry[transfer.sender].withdrawal_credentials == + BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] + ) + # Verify that the signature is valid + assert bls_verify( + pubkey=transfer.pubkey, + message_hash=signed_root(transfer), + signature=transfer.signature, + domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) + ) + # Process the transfer + decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) + increase_balance(state, transfer.recipient, transfer.amount) + increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) + + +def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: + assert block.state_root == hash_tree_root(state) + +# Monkey patch validator get committee code +_compute_committee = compute_committee +committee_cache = {} + + +def compute_committee(validator_indices: List[ValidatorIndex], + seed: Bytes32, + index: int, + total_committees: int) -> List[ValidatorIndex]: + + param_hash = (hash_tree_root(validator_indices), seed, index, total_committees) + + if param_hash in committee_cache: + # print("Cache hit, epoch={0}".format(epoch)) + return committee_cache[param_hash] + else: + # print("Cache miss, epoch={0}".format(epoch)) + ret = _compute_committee(validator_indices, seed, index, total_committees) + committee_cache[param_hash] = ret + return ret + + +# Monkey patch hash cache +_hash = hash +hash_cache = {} + + +def hash(x): + if x in hash_cache: + return hash_cache[x] + else: + ret = _hash(x) + hash_cache[x] = ret + return ret + \ No newline at end of file diff --git a/test_libs/pyspec/phase0/state_transition.py b/test_libs/pyspec/eth2/phase0/state_transition.py similarity index 97% rename from test_libs/pyspec/phase0/state_transition.py rename to test_libs/pyspec/eth2/phase0/state_transition.py index cfd941c42..f2fb68f6b 100644 --- a/test_libs/pyspec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2/phase0/state_transition.py @@ -1,12 +1,10 @@ from . import spec -from typing import ( # noqa: F401 +from typing import ( Any, Callable, - List, - NewType, - Tuple, + List ) from .spec import ( diff --git a/test_libs/pyspec/eth2/utils/__init__.py b/test_libs/pyspec/eth2/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/utils/bls_stub.py b/test_libs/pyspec/eth2/utils/bls_stub.py similarity index 100% rename from test_libs/pyspec/utils/bls_stub.py rename to test_libs/pyspec/eth2/utils/bls_stub.py diff --git a/test_libs/pyspec/utils/hash_function.py b/test_libs/pyspec/eth2/utils/hash_function.py similarity index 100% rename from test_libs/pyspec/utils/hash_function.py rename to test_libs/pyspec/eth2/utils/hash_function.py diff --git a/test_libs/pyspec/utils/merkle_minimal.py b/test_libs/pyspec/eth2/utils/merkle_minimal.py similarity index 100% rename from test_libs/pyspec/utils/merkle_minimal.py rename to test_libs/pyspec/eth2/utils/merkle_minimal.py diff --git a/test_libs/pyspec/utils/minimal_ssz.py b/test_libs/pyspec/eth2/utils/minimal_ssz.py similarity index 100% rename from test_libs/pyspec/utils/minimal_ssz.py rename to test_libs/pyspec/eth2/utils/minimal_ssz.py diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 5d121a263..64ae482fd 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -3,7 +3,7 @@ from distutils.core import setup setup( name='pyspec', version='1.0', - packages=['debug', 'utils', 'phase0'], + packages=['eth2'], install_requires=[ "eth-utils>=1.3.0,<2", "eth-typing>=2.1.0,<3.0.0", From ec4d41e15da01e4da6626d41e11fe034a8f87476 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 02:30:47 +0800 Subject: [PATCH 124/481] fix config, work on py_tests --- .gitignore | 1 + Makefile | 2 +- py_tests/README.md | 23 +++++++++++++++++++ .../test_process_attestation.py | 9 ++++---- .../test_process_block_header.py | 4 ++-- .../block_processing/test_process_deposit.py | 6 ++--- .../test_process_proposer_slashing.py | 6 ++--- .../block_processing/test_voluntary_exit.py | 6 ++--- py_tests/phase0/conftest.py | 4 ++-- py_tests/phase0/helpers.py | 8 +++---- py_tests/phase0/test_sanity.py | 12 +++++----- py_tests/requirements.txt | 1 + test_generators/README.md | 2 +- 13 files changed, 54 insertions(+), 30 deletions(-) create mode 100644 py_tests/README.md diff --git a/.gitignore b/.gitignore index 816ecfa26..16576a634 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ build/ output/ yaml_tests/ +.pytest_cache diff --git a/Makefile b/Makefile index 3812a8255..84dbd0c26 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ define build_yaml_tests # Activate the venv, this is where dependencies are installed for the generator . $(GENERATOR_VENVS_DIR)$(1)bin/activate # Install all the necessary requirements - pip3 install -r $(GENERATOR_DIR)$(1)requirements.txt --user + pip3 install -r $(GENERATOR_DIR)$(1)requirements.txt # Run the generator. The generator is assumed to have an "main.py" file. # We output to the tests dir (generator program should accept a "-p " argument. diff --git a/py_tests/README.md b/py_tests/README.md new file mode 100644 index 000000000..659e737f1 --- /dev/null +++ b/py_tests/README.md @@ -0,0 +1,23 @@ +# ETH 2.0 py-tests + +These tests are not intended for client-consumption. +These tests are sanity tests, to verify if the spec itself is consistent. + +There are ideas to port these tests to the YAML test suite, + but we are still looking for inputs on how this should work. + +## How to run tests + +From within the py_tests folder: + +Install dependencies: +```bash +python3 -m venv venv +. py_tests/venv/bin/activate +pip3 install -r requirements.txt +``` + +Run the tests: +``` +pytest -m minimal_config . +``` diff --git a/py_tests/phase0/block_processing/test_process_attestation.py b/py_tests/phase0/block_processing/test_process_attestation.py index 08cab11ff..f76ca77d8 100644 --- a/py_tests/phase0/block_processing/test_process_attestation.py +++ b/py_tests/phase0/block_processing/test_process_attestation.py @@ -1,18 +1,17 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec +import eth2.phase0.spec as spec -from build.phase0.state_transition import ( +from eth2.phase0.state_transition import ( state_transition, ) -from build.phase0.spec import ( - ZERO_HASH, +from eth2.phase0.spec import ( get_current_epoch, process_attestation, slot_to_epoch, ) -from tests.phase0.helpers import ( +from ..helpers import ( build_empty_block_for_next_slot, get_valid_attestation, ) diff --git a/py_tests/phase0/block_processing/test_process_block_header.py b/py_tests/phase0/block_processing/test_process_block_header.py index 4981b656c..ef902bb72 100644 --- a/py_tests/phase0/block_processing/test_process_block_header.py +++ b/py_tests/phase0/block_processing/test_process_block_header.py @@ -2,13 +2,13 @@ from copy import deepcopy import pytest -from build.phase0.spec import ( +from eth2.phase0.spec import ( get_beacon_proposer_index, cache_state, advance_slot, process_block_header, ) -from tests.phase0.helpers import ( +from ..helpers import ( build_empty_block_for_next_slot, ) diff --git a/py_tests/phase0/block_processing/test_process_deposit.py b/py_tests/phase0/block_processing/test_process_deposit.py index 0726dddef..19467984e 100644 --- a/py_tests/phase0/block_processing/test_process_deposit.py +++ b/py_tests/phase0/block_processing/test_process_deposit.py @@ -1,14 +1,14 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec +import eth2.phase0.spec as spec -from build.phase0.spec import ( +from eth2.phase0.spec import ( get_balance, ZERO_HASH, process_deposit, ) -from tests.phase0.helpers import ( +from ..helpers import ( build_deposit, privkeys, pubkeys, diff --git a/py_tests/phase0/block_processing/test_process_proposer_slashing.py b/py_tests/phase0/block_processing/test_process_proposer_slashing.py index 467d2164b..761624c25 100644 --- a/py_tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/py_tests/phase0/block_processing/test_process_proposer_slashing.py @@ -1,13 +1,13 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec -from build.phase0.spec import ( +import eth2.phase0.spec as spec +from eth2.phase0.spec import ( get_balance, get_current_epoch, process_proposer_slashing, ) -from tests.phase0.helpers import ( +from ..helpers import ( get_valid_proposer_slashing, ) diff --git a/py_tests/phase0/block_processing/test_voluntary_exit.py b/py_tests/phase0/block_processing/test_voluntary_exit.py index 6adc81464..45a8af1bb 100644 --- a/py_tests/phase0/block_processing/test_voluntary_exit.py +++ b/py_tests/phase0/block_processing/test_voluntary_exit.py @@ -1,14 +1,14 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec +import eth2.phase0.spec as spec -from build.phase0.spec import ( +from eth2.phase0.spec import ( get_active_validator_indices, get_current_epoch, process_voluntary_exit, ) -from tests.phase0.helpers import ( +from ..helpers import ( build_voluntary_exit, pubkey_to_privkey, ) diff --git a/py_tests/phase0/conftest.py b/py_tests/phase0/conftest.py index 36a087941..0def66ad6 100644 --- a/py_tests/phase0/conftest.py +++ b/py_tests/phase0/conftest.py @@ -1,8 +1,8 @@ import pytest -from build.phase0 import spec +from eth2.phase0 import spec -from tests.phase0.helpers import ( +from .helpers import ( create_genesis_state, ) diff --git a/py_tests/phase0/helpers.py b/py_tests/phase0/helpers.py index d7f4ae6e8..3be231ee2 100644 --- a/py_tests/phase0/helpers.py +++ b/py_tests/phase0/helpers.py @@ -2,9 +2,9 @@ from copy import deepcopy from py_ecc import bls -import build.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root -from build.phase0.spec import ( +import eth2.phase0.spec as spec +from eth2.utils.minimal_ssz import signed_root +from eth2.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -31,7 +31,7 @@ from build.phase0.spec import ( verify_merkle_branch, hash, ) -from build.phase0.utils.merkle_minimal import ( +from eth2.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/py_tests/phase0/test_sanity.py b/py_tests/phase0/test_sanity.py index 3b4497ca5..0af65ad51 100644 --- a/py_tests/phase0/test_sanity.py +++ b/py_tests/phase0/test_sanity.py @@ -3,10 +3,10 @@ from copy import deepcopy import pytest from py_ecc import bls -import build.phase0.spec as spec +import eth2.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root -from build.phase0.spec import ( +from eth2.utils.minimal_ssz import signed_root +from eth2.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -27,15 +27,15 @@ from build.phase0.spec import ( verify_merkle_branch, hash, ) -from build.phase0.state_transition import ( +from eth2.phase0.state_transition import ( state_transition, ) -from build.phase0.utils.merkle_minimal import ( +from eth2.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, ) -from tests.phase0.helpers import ( +from .helpers import ( build_deposit_data, build_empty_block_for_next_slot, force_registry_change_at_next_epoch, diff --git a/py_tests/requirements.txt b/py_tests/requirements.txt index 9145e951e..d18b29127 100644 --- a/py_tests/requirements.txt +++ b/py_tests/requirements.txt @@ -4,3 +4,4 @@ oyaml==0.7 pycryptodome==3.7.3 py_ecc>=1.6.0 pytest>=3.6,<3.7 +../pyspec diff --git a/test_generators/README.md b/test_generators/README.md index 2d6160c03..61dc96063 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -62,7 +62,7 @@ eth-utils==1.4.1 Install all the necessary requirements (re-run when you add more): ```bash -pip3 install -r requirements.txt --user +pip3 install -r requirements.txt ``` And write your initial test generator, extending the base generator: From b5bf56376bdd2e5e0034965b45c192169575440f Mon Sep 17 00:00:00 2001 From: William M Peaster Date: Wed, 27 Mar 2019 14:00:28 -0500 Subject: [PATCH 125/481] Minor copyediting corrections to 0_beacon-chain.md A handful of minor editing changes made to non-code text for the purposes of improved clarity, consistency, and accuracy. --- specs/core/0_beacon-chain.md | 40 ++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2c0bc2554..92acdd70d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- The Beacon Chain -**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](#ref-python-poc). +**NOTICE**: This document is a work in progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](#ref-python-poc). ## Table of contents @@ -149,9 +149,9 @@ This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain. -At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0 the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and after a queuing process. Exit is either voluntary or done forcibly as a penalty for misbehavior. +At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of [validators](#dfn-validator). In the initial deployment phases of Ethereum 2.0, the only mechanism to become a [validator](#dfn-validator) is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a [validator](#dfn-validator) happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior. -The primary source of load on the beacon chain is "attestations". Attestations are availability votes for a shard block, and simultaneously proof of stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication. +The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block and proof-of-stake votes for a beacon block. A sufficient number of attestations for the same shard block create a "crosslink", confirming the shard segment up to that shard block into the beacon chain. Crosslinks also serve as infrastructure for asynchronous cross-shard communication. ## Notation @@ -159,20 +159,20 @@ Code snippets appearing in `this style` are to be interpreted as Python code. ## Terminology -* **Validator** - a registered participant in the beacon chain. You can become one by sending Ether into the Ethereum 1.0 deposit contract. +* **Validator** - a registered participant in the beacon chain. You can become one by sending ether into the Ethereum 1.0 deposit contract. * **Active validator** - an active participant in the Ethereum 2.0 consensus invited to, among other things, propose and attest to blocks and vote for crosslinks. * **Committee** - a (pseudo-) randomly sampled subset of [active validators](#dfn-active-validator). When a committee is referred to collectively, as in "this committee attests to X", this is assumed to mean "some subset of that committee that contains enough [validators](#dfn-validator) that the protocol recognizes it as representing the committee". -* **Proposer** - the [validator](#dfn-validator) that creates a beacon chain block +* **Proposer** - the [validator](#dfn-validator) that creates a beacon chain block. * **Attester** - a [validator](#dfn-validator) that is part of a committee that needs to sign off on a beacon chain block while simultaneously creating a link (crosslink) to a recent shard block on a particular shard chain. * **Beacon chain** - the central PoS chain that is the base of the sharding system. * **Shard chain** - one of the chains on which user transactions take place and account data is stored. * **Block root** - a 32-byte Merkle root of a beacon chain block or shard chain block. Previously called "block hash". -* **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain, which can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. -* **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations -* **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation -* **Finalized**, **justified** - see Casper FFG finalization [[casper-ffg]](#ref-casper-ffg) -* **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable -* **Genesis time** - the Unix time of the genesis beacon chain block at slot 0 +* **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. +* **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations. +* **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation. +* **Finalized**, **justified** - see Casper FFG finalization [[casper-ffg]](#ref-casper-ffg). +* **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable. +* **Genesis time** - the Unix time of the genesis beacon chain block at slot 0. ## Constants @@ -871,7 +871,7 @@ def compute_committee(validator_indices: List[ValidatorIndex], ] ``` -**Note**: this definition and the next few definitions are highly inefficient as algorithms as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. +**Note**: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. ### `get_current_epoch_committee_count` @@ -1426,7 +1426,7 @@ Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSI ### `Eth2Genesis` log -When sufficiently many full deposits have been made the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: +When a sufficient amount of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: * `genesis_time` equals `time` in the `Eth2Genesis` log * `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log @@ -1557,13 +1557,13 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], ## Beacon chain processing -The beacon chain is the system chain for Ethereum 2.0. The main responsibilities of the beacon chain are: +The beacon chain is the system chain for Ethereum 2.0. The main responsibilities of the beacon chain are as follows: * Store and maintain the registry of [validators](#dfn-validator) * Process crosslinks (see above) * Process its per-block consensus, as well as the finality gadget -Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks, and maintain a view of what is the current "canonical chain", terminating at the current "head". However, because of the beacon chain's relationship with Ethereum 1.0, and because it is a proof-of-stake chain, there are differences. +Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". However, because of the beacon chain's relationship with Ethereum 1.0, and because it is a proof-of-stake chain, there are differences. For a beacon chain block, `block`, to be processed by a node, the following conditions must be met: @@ -1573,7 +1573,7 @@ For a beacon chain block, `block`, to be processed by a node, the following cond If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. -Beacon block production is significantly different because of the proof of stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block, and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. +Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this dynamic requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. ### Beacon chain fork choice rule @@ -1635,7 +1635,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) ## Beacon chain state transition function -We now define the state transition function. At a high level the state transition is made up of four parts: +We now define the state transition function. At a high level, the state transition is made up of four parts: 1. State caching, which happens at the start of every slot. 2. The per-epoch transitions, which happens at the start of the first slot of every epoch. @@ -1643,7 +1643,7 @@ We now define the state transition function. At a high level the state transitio 4. The per-block transitions, which happens at every block. Transition section notes: -* The state caching, caches the state root of the previous slot. +* The state caching caches the state root of the previous slot. * The per-epoch transitions focus on the [validator](#dfn-validator) registry, including adjusting balances and activating and exiting [validators](#dfn-validator), as well as processing crosslinks and managing block justification/finalization. * The per-slot transitions focus on the slot counter and block roots records updates. * The per-block transitions generally focus on verifying aggregate signatures and saving temporary records relating to the per-block activity in the `BeaconState`. @@ -1876,7 +1876,7 @@ def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_sin return get_base_reward(state, index) + extra_penalty ``` -Note: When applying penalties in the following balance recalculations implementers should make sure the `uint64` does not underflow. +Note: When applying penalties in the following balance recalculations, implementers should make sure the `uint64` does not underflow. ##### Justification and finalization @@ -2430,7 +2430,7 @@ def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: # References -This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely that, information. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. +This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely that, informative. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. ## Normative From 458eb9913364792fef95d78609ffb83fe01cd83e Mon Sep 17 00:00:00 2001 From: William M Peaster Date: Wed, 27 Mar 2019 14:15:50 -0500 Subject: [PATCH 126/481] Minor copyedits to 0_beacon-chain.md Approximately a dozen minor copyediting fixes of non-code text for improved clarity, consistency, and accuracy. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 92acdd70d..cf6527ad1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2430,7 +2430,7 @@ def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: # References -This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely that, informative. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. +This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely helpful information. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. ## Normative From 1aaa0030fc90c4e8557ea2c7bb08d4c86d4085bd Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 28 Mar 2019 08:58:18 -0500 Subject: [PATCH 127/481] Withdrawal queue -> exit queue --- specs/core/0_beacon-chain.md | 181 +++++++++++------------------------ 1 file changed, 55 insertions(+), 126 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2c0bc2554..14b4f566a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -98,9 +98,7 @@ - [Routines for updating validator status](#routines-for-updating-validator-status) - [`activate_validator`](#activate_validator) - [`initiate_validator_exit`](#initiate_validator_exit) - - [`exit_validator`](#exit_validator) - [`slash_validator`](#slash_validator) - - [`prepare_validator_for_withdrawal`](#prepare_validator_for_withdrawal) - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) - [Deposit arguments](#deposit-arguments) - [Withdrawal credentials](#withdrawal-credentials) @@ -121,8 +119,8 @@ - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks-1) - [Apply rewards](#apply-rewards) - - [Ejections](#ejections) - - [Validator registry and shuffling seed data](#validator-registry-and-shuffling-seed-data) + - [Balance-driven status transitions](#balance-driven-status-transitions) + - [Validator registry and start shard](#validator-registry-and-start-shard) - [Slashings and exit queue](#slashings-and-exit-queue) - [Final updates](#final-updates) - [Per-slot processing](#per-slot-processing) @@ -182,7 +180,6 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | - | - | | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | -| `MAX_BALANCE_CHURN_QUOTIENT` | `2**5` (= 32) | | `MAX_SLASHABLE_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | @@ -418,14 +415,14 @@ The types are defined topologically to aid in facilitating an executable version 'pubkey': 'bytes48', # Withdrawal credentials 'withdrawal_credentials': 'bytes32', + # Epoch when became eligible for activation + 'activation_eligibility_epoch': 'uint64', # Epoch when validator activated 'activation_epoch': 'uint64', # Epoch when validator exited 'exit_epoch': 'uint64', # Epoch when validator is eligible to withdraw 'withdrawable_epoch': 'uint64', - # Did the validator initiate an exit - 'initiated_exit': 'bool', # Was the validator slashed 'slashed': 'bool', # Rounded balance @@ -596,6 +593,10 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], 'latest_start_shard': 'uint64', + + # Exit queue + 'exit_epoch': 'uint64', + 'exit_queue_filled': 'uint64' # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -1352,22 +1353,20 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[index] - validator.initiated_exit = True -``` - -#### `exit_validator` - -```python -def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Exit the validator with the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - - # Update validator exit epoch if not previously exited + # Operation is a no-op if validator is already in the queue if validator.exit_epoch == FAR_FUTURE_EPOCH: - validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + # Update exit queue counters + if state.exit_epoch < get_delayed_activation_exit_epoch(get_current_epoch(state)): + state.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + if state.exit_queue_filled >= MAX_EXIT_DEQUEUES_PER_EPOCH: + state.exit_epoch += 1 + state.exit_queue_filled = 0 + # Set validator exit epoch and withdrawable epoch + if validator.exit_epoch > state.exit_epoch: + validator.exit_epoch = state.exit_epoch + validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + # Extend queue + state.exit_queue_filled += 1 ``` #### `slash_validator` @@ -1379,7 +1378,7 @@ def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[index] - exit_validator(state, index) + initiate_validator_exit(state, index) state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) whistleblower_index = get_beacon_proposer_index(state, state.slot) @@ -1390,19 +1389,6 @@ def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH ``` -#### `prepare_validator_for_withdrawal` - -```python -def prepare_validator_for_withdrawal(state: BeaconState, index: ValidatorIndex) -> None: - """ - Set the validator with the given ``index`` as withdrawable - ``MIN_VALIDATOR_WITHDRAWABILITY_DELAY`` after the current epoch. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - validator.withdrawable_epoch = get_current_epoch(state) + MIN_VALIDATOR_WITHDRAWABILITY_DELAY -``` - ## Ethereum 1.0 deposit contract The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state. @@ -1512,6 +1498,10 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), latest_start_shard=GENESIS_START_SHARD, + + # Exit queue + exit_epoch=GENESIS_EPOCH, + exit_queue_filled=0, # Finality previous_epoch_attestations=[], @@ -1690,16 +1680,16 @@ def get_previous_total_balance(state: BeaconState) -> Gwei: ``` ```python -def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: +def get_unslashed_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: output = set() for a in attestations: output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) - return sorted(list(output)) + return sorted(filter(lambda index: not state.validator_registry[index].is_slashed, list(output))) ``` ```python def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: - return get_total_balance(state, get_attesting_indices(state, attestations)) + return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) ``` ```python @@ -1747,7 +1737,7 @@ def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple # lexicographically higher hash winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) - return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) + return winning_root, get_unslashed_attesting_indices(state, get_attestations_for(winning_root)) ``` ```python @@ -1904,7 +1894,7 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ for index in eligible_validators: base_reward = get_base_reward(state, index) # Expected FFG source - if index in get_attesting_indices(state, state.previous_epoch_attestations): + if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): rewards[index] += base_reward * total_attesting_balance // total_balance # Inclusion speed bonus rewards[index] += ( @@ -1914,17 +1904,17 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ else: penalties[index] += base_reward # Expected FFG target - if index in get_attesting_indices(state, boundary_attestations): + if index in get_unslashed_attesting_indices(state, boundary_attestations): rewards[index] += base_reward * boundary_attesting_balance // total_balance else: penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) # Expected head - if index in get_attesting_indices(state, matching_head_attestations): + if index in get_unslashed_attesting_indices(state, matching_head_attestations): rewards[index] += base_reward * matching_head_balance // total_balance else: penalties[index] += base_reward # Proposer bonus - if index in get_attesting_indices(state, state.previous_epoch_attestations): + if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) rewards[proposer_index] += base_reward // ATTESTATION_INCLUSION_REWARD_QUOTIENT # Take away max rewards if we're not finalizing @@ -1973,72 +1963,24 @@ def apply_rewards(state: BeaconState) -> None: ) ``` -#### Ejections +#### Balance-driven status transitions -Run `process_ejections(state)`. +Run `process_balance_driven_status_transitions(state)`. ```python def process_ejections(state: BeaconState) -> None: """ Iterate through the validator registry - and eject active validators with balance below ``EJECTION_BALANCE``. + and deposit or eject active validators with sufficiently high or low balances """ - for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): - if get_balance(state, index) < EJECTION_BALANCE: + for index, validator in enumeratE(state.validator_registry): + if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: + state.activation_eligibility_epoch = get_current_epoch(state) + if is_active(validator, get_current_epoch(state)) and get_balance(state, index) < EJECTION_BALANCE: initiate_validator_exit(state, index) ``` -#### Validator registry and shuffling seed data - -```python -def update_validator_registry(state: BeaconState) -> None: - """ - Update validator registry. - Note that this function mutates ``state``. - """ - current_epoch = get_current_epoch(state) - # The active validators - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) - # The total effective balance of active validators - total_balance = get_total_balance(state, active_validator_indices) - - # The maximum balance churn in Gwei (for deposits and exits separately) - max_balance_churn = max( - MAX_DEPOSIT_AMOUNT, - total_balance // (2 * MAX_BALANCE_CHURN_QUOTIENT) - ) - - # Activate validators within the allowable balance churn - balance_churn = 0 - for index, validator in enumerate(state.validator_registry): - if validator.activation_epoch == FAR_FUTURE_EPOCH and get_balance(state, index) >= MAX_DEPOSIT_AMOUNT: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Activate validator - activate_validator(state, index, is_genesis=False) - - # Exit validators within the allowable balance churn - if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: - balance_churn = ( - state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - ) - - for index, validator in enumerate(state.validator_registry): - if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Exit validator - exit_validator(state, index) - - state.validator_registry_update_epoch = current_epoch -``` +#### Validator registry and start shard Run the following function: @@ -2046,7 +1988,18 @@ Run the following function: def update_registry(state: BeaconState) -> None: # Check if we should update, and if so, update if state.finalized_epoch > state.validator_registry_update_epoch: - update_validator_registry(state) + # Validator indices that could be activated + indices_for_activation = sorted( + filter( + lambda index: state.validator_registry[index].activation_epoch == FAR_FUTURE_EPOCH + get_active_validator_indices(state.validator_registry, current_epoch), + ), + key=lambda index: state.validator_registry[index].activation_eligibility_epoch + ) + for index in indices_for_activation[:MAX_EXIT_DEQUEUES_PER_EPOCH]: + activate_validator(state, index, is_genesis=False) + + state.validator_registry_update_epoch = current_epoch state.latest_start_shard = ( state.latest_start_shard + get_current_epoch_committee_count(state) @@ -2057,7 +2010,7 @@ def update_registry(state: BeaconState) -> None: #### Slashings and exit queue -Run `process_slashings(state)` and `process_exit_queue(state)`: +Run `process_slashings(state)`: ```python def process_slashings(state: BeaconState) -> None: @@ -2083,30 +2036,6 @@ def process_slashings(state: BeaconState) -> None: decrease_balance(state, index, penalty) ``` -```python -def process_exit_queue(state: BeaconState) -> None: - """ - Process the exit queue. - Note that this function mutates ``state``. - """ - def eligible(index): - validator = state.validator_registry[index] - # Filter out dequeued validators - if validator.withdrawable_epoch != FAR_FUTURE_EPOCH: - return False - # Dequeue if the minimum amount of time has passed - else: - return get_current_epoch(state) >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY - - eligible_indices = filter(eligible, list(range(len(state.validator_registry)))) - # Sort in order of exit epoch, and validators that exit within the same epoch exit in order of validator index - sorted_indices = sorted(eligible_indices, key=lambda index: state.validator_registry[index].exit_epoch) - for dequeues, index in enumerate(sorted_indices): - if dequeues >= MAX_EXIT_DEQUEUES_PER_EPOCH: - break - prepare_validator_for_withdrawal(state, index) -``` - #### Final updates Run the following function: From 3f6d6535ada35fd648f7ed6fad084c080f8af037 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 22:04:51 +0800 Subject: [PATCH 128/481] Fix pyspec package structure --- .gitignore | 3 + test_libs/pyspec/eth2/__init__.py | 0 test_libs/pyspec/eth2/phase0/spec.py | 1692 -------------------------- test_libs/pyspec/setup.py | 2 +- 4 files changed, 4 insertions(+), 1693 deletions(-) delete mode 100644 test_libs/pyspec/eth2/__init__.py delete mode 100644 test_libs/pyspec/eth2/phase0/spec.py diff --git a/.gitignore b/.gitignore index 16576a634..b12e536ff 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,6 @@ output/ yaml_tests/ .pytest_cache + +# Dynamically built from Markdown spec +test_libs/pyspec/eth2/phase0/spec.py diff --git a/test_libs/pyspec/eth2/__init__.py b/test_libs/pyspec/eth2/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/test_libs/pyspec/eth2/phase0/spec.py b/test_libs/pyspec/eth2/phase0/spec.py deleted file mode 100644 index 32dc8e743..000000000 --- a/test_libs/pyspec/eth2/phase0/spec.py +++ /dev/null @@ -1,1692 +0,0 @@ -from eth2.utils.minimal_ssz import * -from eth2.utils.bls_stub import * -def int_to_bytes1(x): return x.to_bytes(1, 'little') -def int_to_bytes2(x): return x.to_bytes(2, 'little') -def int_to_bytes3(x): return x.to_bytes(3, 'little') -def int_to_bytes4(x): return x.to_bytes(4, 'little') -def int_to_bytes8(x): return x.to_bytes(8, 'little') -def int_to_bytes32(x): return x.to_bytes(32, 'little') -def int_to_bytes48(x): return x.to_bytes(48, 'little') -def int_to_bytes96(x): return x.to_bytes(96, 'little') - - -SLOTS_PER_EPOCH = 64 -def slot_to_epoch(x): return x // SLOTS_PER_EPOCH - - -from typing import ( - Any, - Callable, - List, - NewType, - Tuple, -) - - -Slot = NewType('Slot', int) # uint64 -Epoch = NewType('Epoch', int) # uint64 -Shard = NewType('Shard', int) # uint64 -ValidatorIndex = NewType('ValidatorIndex', int) # uint64 -Gwei = NewType('Gwei', int) # uint64 -Bytes32 = NewType('Bytes32', bytes) # bytes32 -BLSPubkey = NewType('BLSPubkey', bytes) # bytes48 -BLSSignature = NewType('BLSSignature', bytes) # bytes96 -Any = None -Store = None - -SHARD_COUNT = 2**10 -TARGET_COMMITTEE_SIZE = 2**7 -MAX_BALANCE_CHURN_QUOTIENT = 2**5 -MAX_SLASHABLE_ATTESTATION_PARTICIPANTS = 2**12 -MAX_EXIT_DEQUEUES_PER_EPOCH = 2**2 -SHUFFLE_ROUND_COUNT = 90 -DEPOSIT_CONTRACT_ADDRESS = 0x1234567890123567890123456789012357890 -DEPOSIT_CONTRACT_TREE_DEPTH = 2**5 -MIN_DEPOSIT_AMOUNT = 2**0 * 10**9 -MAX_DEPOSIT_AMOUNT = 2**5 * 10**9 -EJECTION_BALANCE = 2**4 * 10**9 -HIGH_BALANCE_INCREMENT = 2**0 * 10**9 -GENESIS_FORK_VERSION = int_to_bytes4(0) -GENESIS_SLOT = 2**32 -GENESIS_EPOCH = slot_to_epoch(GENESIS_SLOT) -GENESIS_START_SHARD = 0 -FAR_FUTURE_EPOCH = 2**64 - 1 -ZERO_HASH = int_to_bytes32(0) -EMPTY_SIGNATURE = int_to_bytes96(0) -BLS_WITHDRAWAL_PREFIX_BYTE = int_to_bytes1(0) -SECONDS_PER_SLOT = 6 -MIN_ATTESTATION_INCLUSION_DELAY = 2**2 -SLOTS_PER_EPOCH = 2**6 -MIN_SEED_LOOKAHEAD = 2**0 -ACTIVATION_EXIT_DELAY = 2**2 -EPOCHS_PER_ETH1_VOTING_PERIOD = 2**4 -SLOTS_PER_HISTORICAL_ROOT = 2**13 -MIN_VALIDATOR_WITHDRAWABILITY_DELAY = 2**8 -PERSISTENT_COMMITTEE_PERIOD = 2**11 -MAX_CROSSLINK_EPOCHS = 2**6 -LATEST_RANDAO_MIXES_LENGTH = 2**13 -LATEST_ACTIVE_INDEX_ROOTS_LENGTH = 2**13 -LATEST_SLASHED_EXIT_LENGTH = 2**13 -BASE_REWARD_QUOTIENT = 2**5 -WHISTLEBLOWER_REWARD_QUOTIENT = 2**9 -ATTESTATION_INCLUSION_REWARD_QUOTIENT = 2**3 -INACTIVITY_PENALTY_QUOTIENT = 2**24 -MIN_PENALTY_QUOTIENT = 2**5 -MAX_PROPOSER_SLASHINGS = 2**4 -MAX_ATTESTER_SLASHINGS = 2**0 -MAX_ATTESTATIONS = 2**7 -MAX_DEPOSITS = 2**4 -MAX_VOLUNTARY_EXITS = 2**4 -MAX_TRANSFERS = 2**4 -DOMAIN_BEACON_BLOCK = 0 -DOMAIN_RANDAO = 1 -DOMAIN_ATTESTATION = 2 -DOMAIN_DEPOSIT = 3 -DOMAIN_VOLUNTARY_EXIT = 4 -DOMAIN_TRANSFER = 5 -Fork = SSZType({ - # Previous fork version - 'previous_version': 'bytes4', - # Current fork version - 'current_version': 'bytes4', - # Fork epoch number - 'epoch': 'uint64', -}) -Crosslink = SSZType({ - # Epoch number - 'epoch': 'uint64', - # Shard data since the previous crosslink - 'crosslink_data_root': 'bytes32', -}) -Eth1Data = SSZType({ - # Root of the deposit tree - 'deposit_root': 'bytes32', - # Total number of deposits - 'deposit_count': 'uint64', - # Block hash - 'block_hash': 'bytes32', -}) -Eth1DataVote = SSZType({ - # Data being voted for - 'eth1_data': Eth1Data, - # Vote count - 'vote_count': 'uint64', -}) -AttestationData = SSZType({ - # LMD GHOST vote - 'slot': 'uint64', - 'beacon_block_root': 'bytes32', - - # FFG vote - 'source_epoch': 'uint64', - 'source_root': 'bytes32', - 'target_root': 'bytes32', - - # Crosslink vote - 'shard': 'uint64', - 'previous_crosslink': Crosslink, - 'crosslink_data_root': 'bytes32', -}) -AttestationDataAndCustodyBit = SSZType({ - # Attestation data - 'data': AttestationData, - # Custody bit - 'custody_bit': 'bool', -}) -SlashableAttestation = SSZType({ - # Validator indices - 'validator_indices': ['uint64'], - # Attestation data - 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', - # Aggregate signature - 'aggregate_signature': 'bytes96', -}) -DepositData = SSZType({ - # BLS pubkey - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # Amount in Gwei - 'amount': 'uint64', - # Container self-signature - 'proof_of_possession': 'bytes96', -}) -BeaconBlockHeader = SSZType({ - 'slot': 'uint64', - 'previous_block_root': 'bytes32', - 'state_root': 'bytes32', - 'block_body_root': 'bytes32', - 'signature': 'bytes96', -}) -Validator = SSZType({ - # BLS public key - 'pubkey': 'bytes48', - # Withdrawal credentials - 'withdrawal_credentials': 'bytes32', - # Epoch when validator activated - 'activation_epoch': 'uint64', - # Epoch when validator exited - 'exit_epoch': 'uint64', - # Epoch when validator is eligible to withdraw - 'withdrawable_epoch': 'uint64', - # Did the validator initiate an exit - 'initiated_exit': 'bool', - # Was the validator slashed - 'slashed': 'bool', - # Rounded balance - 'high_balance': 'uint64' -}) -PendingAttestation = SSZType({ - # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', - # Attestation data - 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', - # Inclusion slot - 'inclusion_slot': 'uint64', -}) -HistoricalBatch = SSZType({ - # Block roots - 'block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - # State roots - 'state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], -}) -ProposerSlashing = SSZType({ - # Proposer index - 'proposer_index': 'uint64', - # First block header - 'header_1': BeaconBlockHeader, - # Second block header - 'header_2': BeaconBlockHeader, -}) -AttesterSlashing = SSZType({ - # First slashable attestation - 'slashable_attestation_1': SlashableAttestation, - # Second slashable attestation - 'slashable_attestation_2': SlashableAttestation, -}) -Attestation = SSZType({ - # Attester aggregation bitfield - 'aggregation_bitfield': 'bytes', - # Attestation data - 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', - # BLS aggregate signature - 'aggregate_signature': 'bytes96', -}) -Deposit = SSZType({ - # Branch in the deposit tree - 'proof': ['bytes32', DEPOSIT_CONTRACT_TREE_DEPTH], - # Index in the deposit tree - 'index': 'uint64', - # Data - 'data': DepositData, -}) -VoluntaryExit = SSZType({ - # Minimum epoch for processing exit - 'epoch': 'uint64', - # Index of the exiting validator - 'validator_index': 'uint64', - # Validator signature - 'signature': 'bytes96', -}) -Transfer = SSZType({ - # Sender index - 'sender': 'uint64', - # Recipient index - 'recipient': 'uint64', - # Amount in Gwei - 'amount': 'uint64', - # Fee in Gwei for block proposer - 'fee': 'uint64', - # Inclusion slot - 'slot': 'uint64', - # Sender withdrawal pubkey - 'pubkey': 'bytes48', - # Sender signature - 'signature': 'bytes96', -}) -BeaconBlockBody = SSZType({ - 'randao_reveal': 'bytes96', - 'eth1_data': Eth1Data, - 'proposer_slashings': [ProposerSlashing], - 'attester_slashings': [AttesterSlashing], - 'attestations': [Attestation], - 'deposits': [Deposit], - 'voluntary_exits': [VoluntaryExit], - 'transfers': [Transfer], -}) -BeaconBlock = SSZType({ - # Header - 'slot': 'uint64', - 'previous_block_root': 'bytes32', - 'state_root': 'bytes32', - 'body': BeaconBlockBody, - 'signature': 'bytes96', -}) -BeaconState = SSZType({ - # Misc - 'slot': 'uint64', - 'genesis_time': 'uint64', - 'fork': Fork, # For versioning hard forks - - # Validator registry - 'validator_registry': [Validator], - 'balances': ['uint64'], - 'validator_registry_update_epoch': 'uint64', - - # Randomness and committees - 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], - 'latest_start_shard': 'uint64', - - # Finality - 'previous_epoch_attestations': [PendingAttestation], - 'current_epoch_attestations': [PendingAttestation], - 'previous_justified_epoch': 'uint64', - 'current_justified_epoch': 'uint64', - 'previous_justified_root': 'bytes32', - 'current_justified_root': 'bytes32', - 'justification_bitfield': 'uint64', - 'finalized_epoch': 'uint64', - 'finalized_root': 'bytes32', - - # Recent state - 'latest_crosslinks': [Crosslink, SHARD_COUNT], - 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], - 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], - 'latest_slashed_balances': ['uint64', LATEST_SLASHED_EXIT_LENGTH], # Balances slashed at every withdrawal period - 'latest_block_header': BeaconBlockHeader, # `latest_block_header.state_root == ZERO_HASH` temporarily - 'historical_roots': ['bytes32'], - - # Ethereum 1.0 chain data - 'latest_eth1_data': Eth1Data, - 'eth1_data_votes': [Eth1DataVote], - 'deposit_index': 'uint64', -}) - - -def xor(bytes1: Bytes32, bytes2: Bytes32) -> Bytes32: - return bytes(a ^ b for a, b in zip(bytes1, bytes2)) - - -def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: - """ - Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. - """ - return BeaconBlockHeader( - slot=block.slot, - previous_block_root=block.previous_block_root, - state_root=ZERO_HASH, - block_body_root=hash_tree_root(block.body), - # signed_root(block) is used for block id purposes so signature is a stub - signature=EMPTY_SIGNATURE, - ) - - -def slot_to_epoch(slot: Slot) -> Epoch: - """ - Return the epoch number of the given ``slot``. - """ - return slot // SLOTS_PER_EPOCH - - -def get_previous_epoch(state: BeaconState) -> Epoch: - """` - Return the previous epoch of the given ``state``. - """ - return get_current_epoch(state) - 1 - - -def get_current_epoch(state: BeaconState) -> Epoch: - """ - Return the current epoch of the given ``state``. - """ - return slot_to_epoch(state.slot) - - -def get_epoch_start_slot(epoch: Epoch) -> Slot: - """ - Return the starting slot of the given ``epoch``. - """ - return epoch * SLOTS_PER_EPOCH - - -def is_active_validator(validator: Validator, epoch: Epoch) -> bool: - """ - Check if ``validator`` is active. - """ - return validator.activation_epoch <= epoch < validator.exit_epoch - - -def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: - """ - Check if ``validator`` is slashable. - """ - return ( - validator.activation_epoch <= epoch < validator.withdrawable_epoch and - validator.slashed is False - ) - - -def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: - """ - Get indices of active validators from ``validators``. - """ - return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] - - -def get_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: - """ - Return the balance for a validator with the given ``index``. - """ - return state.balances[index] - - -def set_balance(state: BeaconState, index: ValidatorIndex, balance: Gwei) -> None: - """ - Set the balance for a validator with the given ``index`` in both ``BeaconState`` - and validator's rounded balance ``high_balance``. - """ - validator = state.validator_registry[index] - HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 - if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: - validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT - state.balances[index] = balance - - -def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: - """ - Increase the balance for a validator with the given ``index`` by ``delta``. - """ - set_balance(state, index, get_balance(state, index) + delta) - - -def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: - """ - Decrease the balance for a validator with the given ``index`` by ``delta``. - Set to ``0`` when underflow. - """ - current_balance = get_balance(state, index) - set_balance(state, index, current_balance - delta if current_balance >= delta else 0) - - -def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: - """ - Return `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy. - - Utilizes 'swap or not' shuffling found in - https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf - See the 'generalized domain' algorithm on page 3. - """ - assert index < list_size - assert list_size <= 2**40 - - for round in range(SHUFFLE_ROUND_COUNT): - pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size - flip = (pivot - index) % list_size - position = max(index, flip) - source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) - byte = source[(position % 256) // 8] - bit = (byte >> (position % 8)) % 2 - index = flip if bit else index - - return index - - -def get_split_offset(list_size: int, chunks: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] - """ - return (list_size * index) // chunks - - -def get_epoch_committee_count(active_validator_count: int) -> int: - """ - Return the number of committees in one epoch. - """ - return max( - 1, - min( - SHARD_COUNT // SLOTS_PER_EPOCH, - active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, - ) - ) * SLOTS_PER_EPOCH - - -def compute_committee(validator_indices: List[ValidatorIndex], - seed: Bytes32, - index: int, - total_committees: int) -> List[ValidatorIndex]: - """ - Return the ``index``'th shuffled committee out of a total ``total_committees`` - using ``validator_indices`` and ``seed``. - """ - start_offset = get_split_offset(len(validator_indices), total_committees, index) - end_offset = get_split_offset(len(validator_indices), total_committees, index + 1) - return [ - validator_indices[get_permuted_index(i, len(validator_indices), seed)] - for i in range(start_offset, end_offset) - ] - - -def get_current_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the current epoch of the given ``state``. - """ - current_active_validators = get_active_validator_indices( - state.validator_registry, - get_current_epoch(state), - ) - return get_epoch_committee_count(len(current_active_validators)) - - -def get_crosslink_committees_at_slot(state: BeaconState, - slot: Slot) -> List[Tuple[List[ValidatorIndex], Shard]]: - """ - Return the list of ``(committee, shard)`` tuples for the ``slot``. - """ - epoch = slot_to_epoch(slot) - current_epoch = get_current_epoch(state) - previous_epoch = get_previous_epoch(state) - next_epoch = current_epoch + 1 - - assert previous_epoch <= epoch <= next_epoch - indices = get_active_validator_indices( - state.validator_registry, - epoch, - ) - committees_per_epoch = get_epoch_committee_count(len(indices)) - - if epoch == current_epoch: - start_shard = state.latest_start_shard - elif epoch == previous_epoch: - start_shard = (state.latest_start_shard - committees_per_epoch) % SHARD_COUNT - elif epoch == next_epoch: - current_epoch_committees = get_current_epoch_committee_count(state) - start_shard = (state.latest_start_shard + current_epoch_committees) % SHARD_COUNT - - committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH - offset = slot % SLOTS_PER_EPOCH - slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT - seed = generate_seed(state, epoch) - - return [ - ( - compute_committee(indices, seed, committees_per_slot * offset + i, committees_per_epoch), - (slot_start_shard + i) % SHARD_COUNT, - ) - for i in range(committees_per_slot) - ] - - -def get_block_root(state: BeaconState, - slot: Slot) -> Bytes32: - """ - Return the block root at a recent ``slot``. - """ - assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT - return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] - - -def get_state_root(state: BeaconState, - slot: Slot) -> Bytes32: - """ - Return the state root at a recent ``slot``. - """ - assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT - return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] - - -def get_randao_mix(state: BeaconState, - epoch: Epoch) -> Bytes32: - """ - Return the randao mix at a recent ``epoch``. - """ - assert get_current_epoch(state) - LATEST_RANDAO_MIXES_LENGTH < epoch <= get_current_epoch(state) - return state.latest_randao_mixes[epoch % LATEST_RANDAO_MIXES_LENGTH] - - -def get_active_index_root(state: BeaconState, - epoch: Epoch) -> Bytes32: - """ - Return the index root at a recent ``epoch``. - """ - assert get_current_epoch(state) - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY < epoch <= get_current_epoch(state) + ACTIVATION_EXIT_DELAY - return state.latest_active_index_roots[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] - - -def generate_seed(state: BeaconState, - epoch: Epoch) -> Bytes32: - """ - Generate a seed for the given ``epoch``. - """ - return hash( - get_randao_mix(state, epoch - MIN_SEED_LOOKAHEAD) + - get_active_index_root(state, epoch) + - int_to_bytes32(epoch) - ) - - -def get_beacon_proposer_index(state: BeaconState, - slot: Slot) -> ValidatorIndex: - """ - Return the beacon proposer index for the ``slot``. - Due to proposer selection being based upon the validator balances during - the epoch in question, this can only be run for the current epoch. - """ - current_epoch = get_current_epoch(state) - assert slot_to_epoch(slot) == current_epoch - - first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] - i = 0 - while True: - rand_byte = hash( - generate_seed(state, current_epoch) + - int_to_bytes8(i // 32) - )[i % 32] - candidate = first_committee[(current_epoch + i) % len(first_committee)] - if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: - return candidate - i += 1 - - -def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: int, root: Bytes32) -> bool: - """ - Verify that the given ``leaf`` is on the merkle branch ``proof`` - starting with the given ``root``. - """ - value = leaf - for i in range(depth): - if index // (2**i) % 2: - value = hash(proof[i] + value) - else: - value = hash(value + proof[i]) - return value == root - - -def get_attestation_participants(state: BeaconState, - attestation_data: AttestationData, - bitfield: bytes) -> List[ValidatorIndex]: - """ - Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. - """ - # Find the committee in the list with the desired shard - crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - - assert attestation_data.shard in [shard for _, shard in crosslink_committees] - crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] - - assert verify_bitfield(bitfield, len(crosslink_committee)) - - # Find the participating attesters in the committee - participants = [] - for i, validator_index in enumerate(crosslink_committee): - aggregation_bit = get_bitfield_bit(bitfield, i) - if aggregation_bit == 0b1: - participants.append(validator_index) - return participants - - -def bytes_to_int(data: bytes) -> int: - return int.from_bytes(data, 'little') - - -def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: - """ - Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. - """ - return min(get_balance(state, index), MAX_DEPOSIT_AMOUNT) - - -def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei: - """ - Return the combined effective balance of an array of ``validators``. - """ - return sum([get_effective_balance(state, i) for i in validators]) - - -def get_fork_version(fork: Fork, - epoch: Epoch) -> bytes: - """ - Return the fork version of the given ``epoch``. - """ - if epoch < fork.epoch: - return fork.previous_version - else: - return fork.current_version - - -def get_domain(fork: Fork, - epoch: Epoch, - domain_type: int) -> int: - """ - Get the domain number that represents the fork meta and signature domain. - """ - return bytes_to_int(get_fork_version(fork, epoch) + int_to_bytes4(domain_type)) - - -def get_bitfield_bit(bitfield: bytes, i: int) -> int: - """ - Extract the bit in ``bitfield`` at position ``i``. - """ - return (bitfield[i // 8] >> (i % 8)) % 2 - - -def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: - """ - Verify ``bitfield`` against the ``committee_size``. - """ - if len(bitfield) != (committee_size + 7) // 8: - return False - - # Check `bitfield` is padded with zero bits only - for i in range(committee_size, len(bitfield) * 8): - if get_bitfield_bit(bitfield, i) == 0b1: - return False - - return True - - -def verify_slashable_attestation(state: BeaconState, slashable_attestation: SlashableAttestation) -> bool: - """ - Verify validity of ``slashable_attestation`` fields. - """ - if slashable_attestation.custody_bitfield != b'\x00' * len(slashable_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] - return False - - if not (1 <= len(slashable_attestation.validator_indices) <= MAX_SLASHABLE_ATTESTATION_PARTICIPANTS): - return False - - for i in range(len(slashable_attestation.validator_indices) - 1): - if slashable_attestation.validator_indices[i] >= slashable_attestation.validator_indices[i + 1]: - return False - - if not verify_bitfield(slashable_attestation.custody_bitfield, len(slashable_attestation.validator_indices)): - return False - - custody_bit_0_indices = [] - custody_bit_1_indices = [] - for i, validator_index in enumerate(slashable_attestation.validator_indices): - if get_bitfield_bit(slashable_attestation.custody_bitfield, i) == 0b0: - custody_bit_0_indices.append(validator_index) - else: - custody_bit_1_indices.append(validator_index) - - return bls_verify_multiple( - pubkeys=[ - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_0_indices]), - bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in custody_bit_1_indices]), - ], - message_hashes=[ - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b0)), - hash_tree_root(AttestationDataAndCustodyBit(data=slashable_attestation.data, custody_bit=0b1)), - ], - signature=slashable_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(slashable_attestation.data.slot), DOMAIN_ATTESTATION), - ) - - -def is_double_vote(attestation_data_1: AttestationData, - attestation_data_2: AttestationData) -> bool: - """ - Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. - """ - target_epoch_1 = slot_to_epoch(attestation_data_1.slot) - target_epoch_2 = slot_to_epoch(attestation_data_2.slot) - return target_epoch_1 == target_epoch_2 - - -def is_surround_vote(attestation_data_1: AttestationData, - attestation_data_2: AttestationData) -> bool: - """ - Check if ``attestation_data_1`` surrounds ``attestation_data_2``. - """ - source_epoch_1 = attestation_data_1.source_epoch - source_epoch_2 = attestation_data_2.source_epoch - target_epoch_1 = slot_to_epoch(attestation_data_1.slot) - target_epoch_2 = slot_to_epoch(attestation_data_2.slot) - - return source_epoch_1 < source_epoch_2 and target_epoch_2 < target_epoch_1 - - -def integer_squareroot(n: int) -> int: - """ - The largest integer ``x`` such that ``x**2`` is less than or equal to ``n``. - """ - assert n >= 0 - x = n - y = (x + 1) // 2 - while y < x: - x = y - y = (x + n // x) // 2 - return x - - -def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: - """ - Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. - """ - return epoch + 1 + ACTIVATION_EXIT_DELAY - - -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - """ - Process a deposit from Ethereum 1.0. - Note that this function mutates ``state``. - """ - # Deposits must be processed in order - assert deposit.index == state.deposit_index - - # Verify the Merkle branch - merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization - proof=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH, - index=deposit.index, - root=state.latest_eth1_data.deposit_root, - ) - assert merkle_branch_is_valid - - # Increment the next deposit index we are expecting. Note that this - # needs to be done here because while the deposit contract will never - # create an invalid Merkle branch, it may admit an invalid deposit - # object, and we need to be able to skip over it - state.deposit_index += 1 - - validator_pubkeys = [v.pubkey for v in state.validator_registry] - pubkey = deposit.data.pubkey - amount = deposit.data.amount - - if pubkey not in validator_pubkeys: - # Verify the proof of possession - proof_is_valid = bls_verify( - pubkey=pubkey, - message_hash=signed_root(deposit.data), - signature=deposit.data.proof_of_possession, - domain=get_domain( - state.fork, - get_current_epoch(state), - DOMAIN_DEPOSIT, - ) - ) - if not proof_is_valid: - return - - # Add new validator - validator = Validator( - pubkey=pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, - slashed=False, - high_balance=0 - ) - - # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. - state.validator_registry.append(validator) - state.balances.append(0) - set_balance(state, len(state.validator_registry) - 1, amount) - else: - # Increase balance by deposit amount - index = validator_pubkeys.index(pubkey) - increase_balance(state, index, amount) - - -def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bool) -> None: - """ - Activate the validator of the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - - validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) - - -def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: - """ - Initiate the validator of the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - validator.initiated_exit = True - - -def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Exit the validator with the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - - # Update validator exit epoch if not previously exited - if validator.exit_epoch == FAR_FUTURE_EPOCH: - validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - - -def slash_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Slash the validator with index ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - exit_validator(state, index) - state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += get_effective_balance(state, index) - - whistleblower_index = get_beacon_proposer_index(state, state.slot) - whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - increase_balance(state, whistleblower_index, whistleblower_reward) - decrease_balance(state, index, whistleblower_reward) - validator.slashed = True - validator.withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH - - -def prepare_validator_for_withdrawal(state: BeaconState, index: ValidatorIndex) -> None: - """ - Set the validator with the given ``index`` as withdrawable - ``MIN_VALIDATOR_WITHDRAWABILITY_DELAY`` after the current epoch. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - validator.withdrawable_epoch = get_current_epoch(state) + MIN_VALIDATOR_WITHDRAWABILITY_DELAY - - -def get_empty_block() -> BeaconBlock: - """ - Get an empty ``BeaconBlock``. - """ - return BeaconBlock( - slot=GENESIS_SLOT, - previous_block_root=ZERO_HASH, - state_root=ZERO_HASH, - body=BeaconBlockBody( - randao_reveal=EMPTY_SIGNATURE, - eth1_data=Eth1Data( - deposit_root=ZERO_HASH, - deposit_count=0, - block_hash=ZERO_HASH, - ), - proposer_slashings=[], - attester_slashings=[], - attestations=[], - deposits=[], - voluntary_exits=[], - transfers=[], - ), - signature=EMPTY_SIGNATURE, - ) - - -def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], - genesis_time: int, - genesis_eth1_data: Eth1Data) -> BeaconState: - """ - Get the genesis ``BeaconState``. - """ - state = BeaconState( - # Misc - slot=GENESIS_SLOT, - genesis_time=genesis_time, - fork=Fork( - previous_version=GENESIS_FORK_VERSION, - current_version=GENESIS_FORK_VERSION, - epoch=GENESIS_EPOCH, - ), - - # Validator registry - validator_registry=[], - balances=[], - validator_registry_update_epoch=GENESIS_EPOCH, - - # Randomness and committees - latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), - latest_start_shard=GENESIS_START_SHARD, - - # Finality - previous_epoch_attestations=[], - current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH - 1, - current_justified_epoch=GENESIS_EPOCH, - previous_justified_root=ZERO_HASH, - current_justified_root=ZERO_HASH, - justification_bitfield=0, - finalized_epoch=GENESIS_EPOCH, - finalized_root=ZERO_HASH, - - # Recent state - latest_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), - latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), - latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), - latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), - latest_slashed_balances=Vector([0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)]), - latest_block_header=get_temporary_block_header(get_empty_block()), - historical_roots=[], - - # Ethereum 1.0 chain data - latest_eth1_data=genesis_eth1_data, - eth1_data_votes=[], - deposit_index=0, - ) - - # Process genesis deposits - for deposit in genesis_validator_deposits: - process_deposit(state, deposit) - - # Process genesis activations - for validator_index, _ in enumerate(state.validator_registry): - if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: - activate_validator(state, validator_index, is_genesis=True) - - genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) - for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): - state.latest_active_index_roots[index] = genesis_active_index_root - - return state - - -def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: - """ - Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found. - """ - if block.slot == slot: - return block - elif block.slot < slot: - return None - else: - return get_ancestor(store, store.get_parent(block), slot) - - -def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock: - """ - Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``. - """ - validators = start_state.validator_registry - active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) - attestation_targets = [ - (validator_index, get_latest_attestation_target(store, validator_index)) - for validator_index in active_validator_indices - ] - - # Use the rounded-balance-with-hysteresis supplied by the protocol for fork - # choice voting. This reduces the number of recomputations that need to be - # made for optimized implementations that precompute and save data - def get_vote_count(block: BeaconBlock) -> int: - return sum( - start_state.validator_registry[validator_index].high_balance - for validator_index, target in attestation_targets - if get_ancestor(store, target, block.slot) == block - ) - - head = start_block - while 1: - children = get_children(store, head) - if len(children) == 0: - return head - head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) - - -def cache_state(state: BeaconState) -> None: - previous_slot_state_root = hash_tree_root(state) - - # store the previous slot's post state transition root - state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_slot_state_root - - # cache state root in stored latest_block_header if empty - if state.latest_block_header.state_root == ZERO_HASH: - state.latest_block_header.state_root = previous_slot_state_root - - # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signed_root(state.latest_block_header) - - -def get_current_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_current_epoch(state))) - - -def get_previous_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_previous_epoch(state))) - - -def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: - output = set() - for a in attestations: - output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) - return sorted(list(output)) - - -def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: - return get_total_balance(state, get_attesting_indices(state, attestations)) - - -def get_current_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.current_epoch_attestations - if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_current_epoch(state))) - ] - - -def get_previous_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.previous_epoch_attestations - if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) - ] - - -def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.previous_epoch_attestations - if a.data.beacon_block_root == get_block_root(state, a.data.slot) - ] - - -def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: - all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations - valid_attestations = [ - a for a in all_attestations if a.data.previous_crosslink == state.latest_crosslinks[shard] - ] - all_roots = [a.data.crosslink_data_root for a in valid_attestations] - - # handle when no attestations for shard available - if len(all_roots) == 0: - return ZERO_HASH, [] - - def get_attestations_for(root) -> List[PendingAttestation]: - return [a for a in valid_attestations if a.data.crosslink_data_root == root] - - # Winning crosslink root is the root with the most votes for it, ties broken in favor of - # lexicographically higher hash - winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) - - return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) - - -def earliest_attestation(state: BeaconState, validator_index: ValidatorIndex) -> PendingAttestation: - return min([ - a for a in state.previous_epoch_attestations if - validator_index in get_attestation_participants(state, a.data, a.aggregation_bitfield) - ], key=lambda a: a.inclusion_slot) - - -def inclusion_slot(state: BeaconState, validator_index: ValidatorIndex) -> Slot: - return earliest_attestation(state, validator_index).inclusion_slot - - -def inclusion_distance(state: BeaconState, validator_index: ValidatorIndex) -> int: - attestation = earliest_attestation(state, validator_index) - return attestation.inclusion_slot - attestation.data.slot - - -def update_justification_and_finalization(state: BeaconState) -> None: - new_justified_epoch = state.current_justified_epoch - new_finalized_epoch = state.finalized_epoch - - # Rotate the justification bitfield up one epoch to make room for the current epoch - state.justification_bitfield <<= 1 - # If the previous epoch gets justified, fill the second last bit - previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) - if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - 1 - state.justification_bitfield |= 2 - # If the current epoch gets justified, fill the last bit - current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) - if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - state.justification_bitfield |= 1 - - # Process finalizations - bitfield = state.justification_bitfield - current_epoch = get_current_epoch(state) - # The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source - if (bitfield >> 1) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 3: - new_finalized_epoch = state.previous_justified_epoch - # The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source - if (bitfield >> 1) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.previous_justified_epoch - # The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and state.current_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.current_justified_epoch - # The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and state.current_justified_epoch == current_epoch - 1: - new_finalized_epoch = state.current_justified_epoch - - # Update state jusification/finality fields - state.previous_justified_epoch = state.current_justified_epoch - state.previous_justified_root = state.current_justified_root - if new_justified_epoch != state.current_justified_epoch: - state.current_justified_epoch = new_justified_epoch - state.current_justified_root = get_block_root(state, get_epoch_start_slot(new_justified_epoch)) - if new_finalized_epoch != state.finalized_epoch: - state.finalized_epoch = new_finalized_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(new_finalized_epoch)) - - -def process_crosslinks(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) - next_epoch = current_epoch + 1 - for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): - for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) - if 3 * participating_balance >= 2 * total_balance: - state.latest_crosslinks[shard] = Crosslink( - epoch=min(slot_to_epoch(slot), state.latest_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=winning_root - ) - - -def maybe_reset_eth1_period(state: BeaconState) -> None: - if (get_current_epoch(state) + 1) % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: - for eth1_data_vote in state.eth1_data_votes: - # If a majority of all votes were for a particular eth1_data value, - # then set that as the new canonical value - if eth1_data_vote.vote_count * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: - state.latest_eth1_data = eth1_data_vote.eth1_data - state.eth1_data_votes = [] - - -def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: - if get_previous_total_balance(state) == 0: - return 0 - - adjusted_quotient = integer_squareroot(get_previous_total_balance(state)) // BASE_REWARD_QUOTIENT - return get_effective_balance(state, index) // adjusted_quotient // 5 - - -def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: - if epochs_since_finality <= 4: - extra_penalty = 0 - else: - extra_penalty = get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 - return get_base_reward(state, index) + extra_penalty - - -def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - current_epoch = get_current_epoch(state) - epochs_since_finality = current_epoch + 1 - state.finalized_epoch - rewards = [0 for index in range(len(state.validator_registry))] - penalties = [0 for index in range(len(state.validator_registry))] - # Some helper variables - boundary_attestations = get_previous_epoch_boundary_attestations(state) - boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) - total_balance = get_previous_total_balance(state) - total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) - matching_head_attestations = get_previous_epoch_matching_head_attestations(state) - matching_head_balance = get_attesting_balance(state, matching_head_attestations) - eligible_validators = [ - index for index, validator in enumerate(state.validator_registry) - if ( - is_active_validator(validator, current_epoch) or - (validator.slashed and current_epoch < validator.withdrawable_epoch) - ) - ] - # Process rewards or penalties for all validators - for index in eligible_validators: - base_reward = get_base_reward(state, index) - # Expected FFG source - if index in get_attesting_indices(state, state.previous_epoch_attestations): - rewards[index] += base_reward * total_attesting_balance // total_balance - # Inclusion speed bonus - rewards[index] += ( - base_reward * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) - else: - penalties[index] += base_reward - # Expected FFG target - if index in get_attesting_indices(state, boundary_attestations): - rewards[index] += base_reward * boundary_attesting_balance // total_balance - else: - penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) - # Expected head - if index in get_attesting_indices(state, matching_head_attestations): - rewards[index] += base_reward * matching_head_balance // total_balance - else: - penalties[index] += base_reward - # Proposer bonus - if index in get_attesting_indices(state, state.previous_epoch_attestations): - proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - rewards[proposer_index] += base_reward // ATTESTATION_INCLUSION_REWARD_QUOTIENT - # Take away max rewards if we're not finalizing - if epochs_since_finality > 4: - penalties[index] += base_reward * 4 - return [rewards, penalties] - - -def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - rewards = [0 for index in range(len(state.validator_registry))] - penalties = [0 for index in range(len(state.validator_registry))] - previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - for slot in range(previous_epoch_start_slot, current_epoch_start_slot): - for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) - for index in crosslink_committee: - if index in participants: - rewards[index] += get_base_reward(state, index) * participating_balance // total_balance - else: - penalties[index] += get_base_reward(state, index) - return [rewards, penalties] - - -def apply_rewards(state: BeaconState) -> None: - rewards1, penalties1 = get_justification_and_finalization_deltas(state) - rewards2, penalties2 = get_crosslink_deltas(state) - for i in range(len(state.validator_registry)): - set_balance( - state, - i, - max( - 0, - get_balance(state, i) + rewards1[i] + rewards2[i] - penalties1[i] - penalties2[i], - ), - ) - - -def process_ejections(state: BeaconState) -> None: - """ - Iterate through the validator registry - and eject active validators with balance below ``EJECTION_BALANCE``. - """ - for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): - if get_balance(state, index) < EJECTION_BALANCE: - initiate_validator_exit(state, index) - - -def update_validator_registry(state: BeaconState) -> None: - """ - Update validator registry. - Note that this function mutates ``state``. - """ - current_epoch = get_current_epoch(state) - # The active validators - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) - # The total effective balance of active validators - total_balance = get_total_balance(state, active_validator_indices) - - # The maximum balance churn in Gwei (for deposits and exits separately) - max_balance_churn = max( - MAX_DEPOSIT_AMOUNT, - total_balance // (2 * MAX_BALANCE_CHURN_QUOTIENT) - ) - - # Activate validators within the allowable balance churn - balance_churn = 0 - for index, validator in enumerate(state.validator_registry): - if validator.activation_epoch == FAR_FUTURE_EPOCH and get_balance(state, index) >= MAX_DEPOSIT_AMOUNT: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Activate validator - activate_validator(state, index, is_genesis=False) - - # Exit validators within the allowable balance churn - if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: - balance_churn = ( - state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - ) - - for index, validator in enumerate(state.validator_registry): - if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Exit validator - exit_validator(state, index) - - state.validator_registry_update_epoch = current_epoch - - -def update_registry(state: BeaconState) -> None: - # Check if we should update, and if so, update - if state.finalized_epoch > state.validator_registry_update_epoch: - update_validator_registry(state) - state.latest_start_shard = ( - state.latest_start_shard + - get_current_epoch_committee_count(state) - ) % SHARD_COUNT - - -def process_slashings(state: BeaconState) -> None: - """ - Process the slashings. - Note that this function mutates ``state``. - """ - current_epoch = get_current_epoch(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) - total_balance = get_total_balance(state, active_validator_indices) - - # Compute `total_penalties` - total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] - total_at_end = state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - total_penalties = total_at_end - total_at_start - - for index, validator in enumerate(state.validator_registry): - if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: - penalty = max( - get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, - get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT - ) - decrease_balance(state, index, penalty) - - -def process_exit_queue(state: BeaconState) -> None: - """ - Process the exit queue. - Note that this function mutates ``state``. - """ - def eligible(index): - validator = state.validator_registry[index] - # Filter out dequeued validators - if validator.withdrawable_epoch != FAR_FUTURE_EPOCH: - return False - # Dequeue if the minimum amount of time has passed - else: - return get_current_epoch(state) >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY - - eligible_indices = filter(eligible, list(range(len(state.validator_registry)))) - # Sort in order of exit epoch, and validators that exit within the same epoch exit in order of validator index - sorted_indices = sorted(eligible_indices, key=lambda index: state.validator_registry[index].exit_epoch) - for dequeues, index in enumerate(sorted_indices): - if dequeues >= MAX_EXIT_DEQUEUES_PER_EPOCH: - break - prepare_validator_for_withdrawal(state, index) - - -def finish_epoch_update(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - next_epoch = current_epoch + 1 - # Set active index root - index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH - state.latest_active_index_roots[index_root_position] = hash_tree_root( - get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY) - ) - # Set total slashed balances - state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = ( - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - ) - # Set randao mix - state.latest_randao_mixes[next_epoch % LATEST_RANDAO_MIXES_LENGTH] = get_randao_mix(state, current_epoch) - # Set historical root accumulator - if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: - historical_batch = HistoricalBatch( - block_roots=state.latest_block_roots, - state_roots=state.latest_state_roots, - ) - state.historical_roots.append(hash_tree_root(historical_batch)) - # Rotate current/previous epoch attestations - state.previous_epoch_attestations = state.current_epoch_attestations - state.current_epoch_attestations = [] - - -def advance_slot(state: BeaconState) -> None: - state.slot += 1 - - -def process_block_header(state: BeaconState, block: BeaconBlock) -> None: - # Verify that the slots match - assert block.slot == state.slot - # Verify that the parent matches - assert block.previous_block_root == signed_root(state.latest_block_header) - # Save current block as the new latest block - state.latest_block_header = get_temporary_block_header(block) - # Verify proposer is not slashed - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] - assert not proposer.slashed - # Verify proposer signature - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=signed_root(block), - signature=block.signature, - domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_BEACON_BLOCK) - ) - - -def process_randao(state: BeaconState, block: BeaconBlock) -> None: - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] - # Verify that the provided randao value is valid - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=hash_tree_root(get_current_epoch(state)), - signature=block.body.randao_reveal, - domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO) - ) - # Mix it in - state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( - xor(get_randao_mix(state, get_current_epoch(state)), - hash(block.body.randao_reveal)) - ) - - -def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: - for eth1_data_vote in state.eth1_data_votes: - # If someone else has already voted for the same hash, add to its counter - if eth1_data_vote.eth1_data == block.body.eth1_data: - eth1_data_vote.vote_count += 1 - return - # If we're seeing this hash for the first time, make a new counter - state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) - - -def process_proposer_slashing(state: BeaconState, - proposer_slashing: ProposerSlashing) -> None: - """ - Process ``ProposerSlashing`` transaction. - Note that this function mutates ``state``. - """ - proposer = state.validator_registry[proposer_slashing.proposer_index] - # Verify that the epoch is the same - assert slot_to_epoch(proposer_slashing.header_1.slot) == slot_to_epoch(proposer_slashing.header_2.slot) - # But the headers are different - assert proposer_slashing.header_1 != proposer_slashing.header_2 - # Check proposer is slashable - assert is_slashable_validator(proposer, get_current_epoch(state)) - # Signatures are valid - for header in (proposer_slashing.header_1, proposer_slashing.header_2): - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=signed_root(header), - signature=header.signature, - domain=get_domain(state.fork, slot_to_epoch(header.slot), DOMAIN_BEACON_BLOCK) - ) - slash_validator(state, proposer_slashing.proposer_index) - - -def process_attester_slashing(state: BeaconState, - attester_slashing: AttesterSlashing) -> None: - """ - Process ``AttesterSlashing`` transaction. - Note that this function mutates ``state``. - """ - attestation1 = attester_slashing.slashable_attestation_1 - attestation2 = attester_slashing.slashable_attestation_2 - # Check that the attestations are conflicting - assert attestation1.data != attestation2.data - assert ( - is_double_vote(attestation1.data, attestation2.data) or - is_surround_vote(attestation1.data, attestation2.data) - ) - assert verify_slashable_attestation(state, attestation1) - assert verify_slashable_attestation(state, attestation2) - slashable_indices = [ - index for index in attestation1.validator_indices - if ( - index in attestation2.validator_indices and - is_slashable_validator(state.validator_registry[index], get_current_epoch(state)) - ) - ] - assert len(slashable_indices) >= 1 - for index in slashable_indices: - slash_validator(state, index) - - -def process_attestation(state: BeaconState, attestation: Attestation) -> None: - """ - Process ``Attestation`` transaction. - Note that this function mutates ``state``. - """ - assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - - # Check target epoch, source epoch, and source root - target_epoch = slot_to_epoch(attestation.data.slot) - assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), - (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), - } - - # Check crosslink data - assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] - assert state.latest_crosslinks[attestation.data.shard] in { - attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink - Crosslink( # Case 2: latest crosslink matches current crosslink - crosslink_data_root=attestation.data.crosslink_data_root, - epoch=min(slot_to_epoch(attestation.data.slot), - attestation.data.previous_crosslink.epoch + MAX_CROSSLINK_EPOCHS) - ), - } - - # Check custody bits [to be generalised in phase 1] - assert attestation.custody_bitfield == b'\x00' * len(attestation.custody_bitfield) - - # Check aggregate signature [to be generalised in phase 1] - participants = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - assert len(participants) != 0 - assert bls_verify( - pubkey=bls_aggregate_pubkeys([state.validator_registry[i].pubkey for i in participants]), - message_hash=hash_tree_root(AttestationDataAndCustodyBit(data=attestation.data, custody_bit=0b0)), - signature=attestation.aggregate_signature, - domain=get_domain(state.fork, target_epoch, DOMAIN_ATTESTATION), - ) - - # Cache pending attestation - pending_attestation = PendingAttestation( - data=attestation.data, - aggregation_bitfield=attestation.aggregation_bitfield, - custody_bitfield=attestation.custody_bitfield, - inclusion_slot=state.slot - ) - if target_epoch == get_current_epoch(state): - state.current_epoch_attestations.append(pending_attestation) - else: - state.previous_epoch_attestations.append(pending_attestation) - - -def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: - """ - Process ``VoluntaryExit`` transaction. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[exit.validator_index] - # Verify the validator is active - assert is_active_validator(validator, get_current_epoch(state)) - # Verify the validator has not yet exited - assert validator.exit_epoch == FAR_FUTURE_EPOCH - # Verify the validator has not initiated an exit - assert validator.initiated_exit is False - # Exits must specify an epoch when they become valid; they are not valid before then - assert get_current_epoch(state) >= exit.epoch - # Verify the validator has been active long enough - assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD - # Verify signature - assert bls_verify( - pubkey=validator.pubkey, - message_hash=signed_root(exit), - signature=exit.signature, - domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) - ) - # Initiate exit - initiate_validator_exit(state, exit.validator_index) - - -def process_transfer(state: BeaconState, transfer: Transfer) -> None: - """ - Process ``Transfer`` transaction. - Note that this function mutates ``state``. - """ - # Verify the amount and fee aren't individually too big (for anti-overflow purposes) - assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) - # Verify that we have enough ETH to send, and that after the transfer the balance will be either - # exactly zero or at least MIN_DEPOSIT_AMOUNT - assert ( - get_balance(state, transfer.sender) == transfer.amount + transfer.fee or - get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT - ) - # A transfer is valid in only one slot - assert state.slot == transfer.slot - # Only withdrawn or not-yet-deposited accounts can transfer - assert ( - get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or - state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH - ) - # Verify that the pubkey is valid - assert ( - state.validator_registry[transfer.sender].withdrawal_credentials == - BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] - ) - # Verify that the signature is valid - assert bls_verify( - pubkey=transfer.pubkey, - message_hash=signed_root(transfer), - signature=transfer.signature, - domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) - ) - # Process the transfer - decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) - increase_balance(state, transfer.recipient, transfer.amount) - increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) - - -def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: - assert block.state_root == hash_tree_root(state) - -# Monkey patch validator get committee code -_compute_committee = compute_committee -committee_cache = {} - - -def compute_committee(validator_indices: List[ValidatorIndex], - seed: Bytes32, - index: int, - total_committees: int) -> List[ValidatorIndex]: - - param_hash = (hash_tree_root(validator_indices), seed, index, total_committees) - - if param_hash in committee_cache: - # print("Cache hit, epoch={0}".format(epoch)) - return committee_cache[param_hash] - else: - # print("Cache miss, epoch={0}".format(epoch)) - ret = _compute_committee(validator_indices, seed, index, total_committees) - committee_cache[param_hash] = ret - return ret - - -# Monkey patch hash cache -_hash = hash -hash_cache = {} - - -def hash(x): - if x in hash_cache: - return hash_cache[x] - else: - ret = _hash(x) - hash_cache[x] = ret - return ret - \ No newline at end of file diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 64ae482fd..6d57e5916 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -3,7 +3,7 @@ from distutils.core import setup setup( name='pyspec', version='1.0', - packages=['eth2'], + packages=['eth2.debug', 'eth2.phase0', 'eth2.utils'], install_requires=[ "eth-utils>=1.3.0,<2", "eth-typing>=2.1.0,<3.0.0", From 05970c93831ca3b0db08e265355335ec4791ae25 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 23:10:16 +0800 Subject: [PATCH 129/481] fix pyspec setup, and update usages: py_tests and in docs --- py_tests/README.md | 3 ++- py_tests/conftest.py | 0 .../block_processing/test_process_attestation.py | 2 +- .../block_processing/test_process_block_header.py | 2 +- .../phase0/block_processing/test_process_deposit.py | 2 +- .../test_process_proposer_slashing.py | 2 +- .../phase0/block_processing/test_voluntary_exit.py | 2 +- py_tests/requirements.txt | 2 +- test_generators/README.md | 11 ++++++++--- {py_tests => test_libs/pyspec/eth2}/__init__.py | 0 test_libs/pyspec/setup.py | 5 ++--- 11 files changed, 18 insertions(+), 13 deletions(-) delete mode 100644 py_tests/conftest.py rename {py_tests => test_libs/pyspec/eth2}/__init__.py (100%) diff --git a/py_tests/README.md b/py_tests/README.md index 659e737f1..86e2b541d 100644 --- a/py_tests/README.md +++ b/py_tests/README.md @@ -13,9 +13,10 @@ From within the py_tests folder: Install dependencies: ```bash python3 -m venv venv -. py_tests/venv/bin/activate +. venv/bin/activate pip3 install -r requirements.txt ``` +Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement. Run the tests: ``` diff --git a/py_tests/conftest.py b/py_tests/conftest.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/py_tests/phase0/block_processing/test_process_attestation.py b/py_tests/phase0/block_processing/test_process_attestation.py index f76ca77d8..ba2754092 100644 --- a/py_tests/phase0/block_processing/test_process_attestation.py +++ b/py_tests/phase0/block_processing/test_process_attestation.py @@ -11,7 +11,7 @@ from eth2.phase0.spec import ( process_attestation, slot_to_epoch, ) -from ..helpers import ( +from phase0.helpers import ( build_empty_block_for_next_slot, get_valid_attestation, ) diff --git a/py_tests/phase0/block_processing/test_process_block_header.py b/py_tests/phase0/block_processing/test_process_block_header.py index ef902bb72..0c4a930c2 100644 --- a/py_tests/phase0/block_processing/test_process_block_header.py +++ b/py_tests/phase0/block_processing/test_process_block_header.py @@ -8,7 +8,7 @@ from eth2.phase0.spec import ( advance_slot, process_block_header, ) -from ..helpers import ( +from phase0.helpers import ( build_empty_block_for_next_slot, ) diff --git a/py_tests/phase0/block_processing/test_process_deposit.py b/py_tests/phase0/block_processing/test_process_deposit.py index 19467984e..bda014665 100644 --- a/py_tests/phase0/block_processing/test_process_deposit.py +++ b/py_tests/phase0/block_processing/test_process_deposit.py @@ -8,7 +8,7 @@ from eth2.phase0.spec import ( ZERO_HASH, process_deposit, ) -from ..helpers import ( +from phase0.helpers import ( build_deposit, privkeys, pubkeys, diff --git a/py_tests/phase0/block_processing/test_process_proposer_slashing.py b/py_tests/phase0/block_processing/test_process_proposer_slashing.py index 761624c25..b172cd988 100644 --- a/py_tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/py_tests/phase0/block_processing/test_process_proposer_slashing.py @@ -7,7 +7,7 @@ from eth2.phase0.spec import ( get_current_epoch, process_proposer_slashing, ) -from ..helpers import ( +from phase0.helpers import ( get_valid_proposer_slashing, ) diff --git a/py_tests/phase0/block_processing/test_voluntary_exit.py b/py_tests/phase0/block_processing/test_voluntary_exit.py index 45a8af1bb..32c747024 100644 --- a/py_tests/phase0/block_processing/test_voluntary_exit.py +++ b/py_tests/phase0/block_processing/test_voluntary_exit.py @@ -8,7 +8,7 @@ from eth2.phase0.spec import ( get_current_epoch, process_voluntary_exit, ) -from ..helpers import ( +from phase0.helpers import ( build_voluntary_exit, pubkey_to_privkey, ) diff --git a/py_tests/requirements.txt b/py_tests/requirements.txt index d18b29127..27b3f22d8 100644 --- a/py_tests/requirements.txt +++ b/py_tests/requirements.txt @@ -4,4 +4,4 @@ oyaml==0.7 pycryptodome==3.7.3 py_ecc>=1.6.0 pytest>=3.6,<3.7 -../pyspec +../test_libs/pyspec diff --git a/test_generators/README.md b/test_generators/README.md index 61dc96063..155059fe0 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -46,9 +46,9 @@ Simply open up the generator (not all at once) of choice in your favorite IDE/ed ```bash # Create a virtual environment (any venv/.venv/.venvs is git-ignored) -python3 -m venv .venv +python3 -m venv venv # Activate the venv, this is where dependencies are installed for the generator -. .venv/bin/activate +. venv/bin/activate ``` Now that you have a virtual environment, write your generator. @@ -57,8 +57,13 @@ It's recommended to extend the base-generator. Create a `requirements.txt` in the root of your generator directory: ``` eth-utils==1.4.1 -../test_libs/gen_helpers +../../test_libs/gen_helpers ``` +And optionally, to include pyspec, add: +``` +../../test_libs/pyspec +``` +Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement. Install all the necessary requirements (re-run when you add more): ```bash diff --git a/py_tests/__init__.py b/test_libs/pyspec/eth2/__init__.py similarity index 100% rename from py_tests/__init__.py rename to test_libs/pyspec/eth2/__init__.py diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 6d57e5916..b04847d37 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -1,9 +1,8 @@ -from distutils.core import setup +from setuptools import setup, find_packages setup( name='pyspec', - version='1.0', - packages=['eth2.debug', 'eth2.phase0', 'eth2.utils'], + packages=find_packages(), install_requires=[ "eth-utils>=1.3.0,<2", "eth-typing>=2.1.0,<3.0.0", From 58f09b20f5c9b8bf75b486b8aefaa5f7b5384919 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 23:23:36 +0800 Subject: [PATCH 130/481] update CI, makefile has test runner now --- .circleci/config.yml | 8 +++----- Makefile | 5 ++--- py_tests/README.md | 6 ++++++ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 411eb9230..efe4d0723 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,14 +8,12 @@ jobs: steps: - checkout - run: - name: Build phase0 spec - command: make phase0 + name: Build pyspec + command: make pyspec - run: name: run py-tests - command: | - . venv/bin/activate - pytest tests + command: make test - run: name: Generate YAML tests diff --git a/Makefile b/Makefile index 84dbd0c26..65839254a 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts TEST_LIBS_DIR = ./test_libs PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec - +PY_TEST_DIR = ./py_tests YAML_TEST_DIR = ./yaml_tests GENERATOR_DIR = ./test_generators GENERATOR_VENVS_DIR = $(GENERATOR_DIR)/.venvs @@ -29,9 +29,8 @@ clean: yaml_tests: $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config -# run pytest with `-m` option to full suite test: $(PY_SPEC_TARGETS) - pytest -m minimal_config tests/ + cd $(PY_TEST_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; pytest -m minimal_config . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_TARGETS) diff --git a/py_tests/README.md b/py_tests/README.md index 86e2b541d..ca2bed4cc 100644 --- a/py_tests/README.md +++ b/py_tests/README.md @@ -8,6 +8,12 @@ There are ideas to port these tests to the YAML test suite, ## How to run tests +### Automated + +Run `make test` from the root of the spec repository. + +### Manual + From within the py_tests folder: Install dependencies: From f2703bc8d3bd43b1dcfeafbb49b462d499409809 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 28 Mar 2019 23:27:28 +0800 Subject: [PATCH 131/481] update readme with example pyspec import --- test_generators/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test_generators/README.md b/test_generators/README.md index 155059fe0..e72f9caf4 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -109,6 +109,12 @@ if __name__ == "__main__": ``` +And to use the pyspec: + +``` +from eth2.phase0 import spec +``` + Recommendations: - you can have more than just 1 generator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])` - you can concatenate lists of test cases, if you don't want to split it up in suites. From deb0e32590fed25280e6923b8d9054b941beccb7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 28 Mar 2019 10:51:36 -0500 Subject: [PATCH 132/481] Fixes to make Justin happy --- specs/core/0_beacon-chain.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 14b4f566a..120aa89d2 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1309,10 +1309,10 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: validator = Validator( pubkey=pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, slashed=False, high_balance=0 ) @@ -1973,7 +1973,7 @@ def process_ejections(state: BeaconState) -> None: Iterate through the validator registry and deposit or eject active validators with sufficiently high or low balances """ - for index, validator in enumeratE(state.validator_registry): + for index, validator in enumerate(state.validator_registry): if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: state.activation_eligibility_epoch = get_current_epoch(state) if is_active(validator, get_current_epoch(state)) and get_balance(state, index) < EJECTION_BALANCE: @@ -2284,8 +2284,6 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: assert is_active_validator(validator, get_current_epoch(state)) # Verify the validator has not yet exited assert validator.exit_epoch == FAR_FUTURE_EPOCH - # Verify the validator has not initiated an exit - assert validator.initiated_exit is False # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch # Verify the validator has been active long enough From adf91f50c579a0a603add2270a956b3f543b500c Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 29 Mar 2019 00:05:40 +0800 Subject: [PATCH 133/481] run yaml test generators from makefile --- Makefile | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 65839254a..566db0a7a 100644 --- a/Makefile +++ b/Makefile @@ -5,12 +5,12 @@ PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec PY_TEST_DIR = ./py_tests YAML_TEST_DIR = ./yaml_tests GENERATOR_DIR = ./test_generators -GENERATOR_VENVS_DIR = $(GENERATOR_DIR)/.venvs # Collect a list of generator names GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) # Map this list of generator paths to a list of test output paths YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) +GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%/venv, $(GENERATORS)) PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2/phase0/spec.py PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) @@ -22,11 +22,12 @@ all: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) $(PY_SPEC_ALL_TARGETS) clean: rm -rf $(YAML_TEST_DIR) - rm -rf $(GENERATOR_VENVS_DIR) + rm -rf $(GENERATOR_VENVS) + rm -rf $(PY_TEST_DIR)/venv rm -rf $(PY_SPEC_ALL_TARGETS) -# "make yaml_tests" to run generators -yaml_tests: $(YAML_TEST_TARGETS) +# "make gen_yaml_tests" to run generators +gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config test: $(PY_SPEC_TARGETS) @@ -43,6 +44,7 @@ $(PY_SPEC_DIR)/eth2/phase0/spec.py: python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ +CURRENT_DIR = ${CURDIR} # The function that builds a set of suite files, by calling a generator for the given type (param 1) define build_yaml_tests @@ -50,22 +52,19 @@ define build_yaml_tests # Create the output mkdir -p $(YAML_TEST_DIR)$(1) - # Create a virtual environment - python3 -m venv $(VENV_DIR)$(1) - # Activate the venv, this is where dependencies are installed for the generator - . $(GENERATOR_VENVS_DIR)$(1)bin/activate - # Install all the necessary requirements - pip3 install -r $(GENERATOR_DIR)$(1)requirements.txt + # 1) Create a virtual environment + # 2) Activate the venv, this is where dependencies are installed for the generator + # 3) Install all the necessary requirements + # 4) Run the generator. The generator is assumed to have an "main.py" file. + # 5) We output to the tests dir (generator program should accept a "-o " argument. + cd $(GENERATOR_DIR)$(1); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) - # Run the generator. The generator is assumed to have an "main.py" file. - # We output to the tests dir (generator program should accept a "-p " argument. - python3 $(GENERATOR_DIR)$(1)main.py -o $(YAML_TEST_DIR)$(1) $(info generator $(1) finished) endef # The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary) $(YAML_TEST_DIR): - $(info ${YAML_TEST_TARGETS}) + $(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS}) mkdir -p $@ # For any target within the tests dir, build it using the build_yaml_tests function. From 04d41ddabeb5e7d29b0ab38c75ce0f7331d6ec1d Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 29 Mar 2019 00:16:18 +0800 Subject: [PATCH 134/481] small fixes/tweaks, pytests and yaml generators work well now --- .circleci/config.yml | 4 ++-- Makefile | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index efe4d0723..c347a064f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,12 +12,12 @@ jobs: command: make pyspec - run: - name: run py-tests + name: Run py-tests command: make test - run: name: Generate YAML tests - command: make yaml_tests + command: make gen_yaml_tests - store_artifacts: path: test-reports diff --git a/Makefile b/Makefile index 566db0a7a..ca7321c22 100644 --- a/Makefile +++ b/Makefile @@ -10,13 +10,13 @@ GENERATOR_DIR = ./test_generators GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) # Map this list of generator paths to a list of test output paths YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) -GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%/venv, $(GENERATORS)) +GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2/phase0/spec.py PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) -.PHONY: clean all test yaml_tests pyspec phase0 +.PHONY: clean all test gen_yaml_tests pyspec phase0 all: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) $(PY_SPEC_ALL_TARGETS) @@ -30,11 +30,11 @@ clean: gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config -test: $(PY_SPEC_TARGETS) +test: $(PY_SPEC_ALL_TARGETS) cd $(PY_TEST_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; pytest -m minimal_config . # "make pyspec" to create the pyspec for all phases. -pyspec: $(PY_SPEC_TARGETS) +pyspec: $(PY_SPEC_ALL_TARGETS) # "make phase0" to create pyspec for phase0 phase0: $(PY_SPEC_PHASE_0_TARGETS) From 3b132b71aabdaf695eedbaee706ed7d9e3d5d019 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 29 Mar 2019 00:24:18 +0800 Subject: [PATCH 135/481] refine makefile, update generator doc --- Makefile | 3 ++- test_generators/README.md | 18 +++++------------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index ca7321c22..93c2e49fa 100644 --- a/Makefile +++ b/Makefile @@ -68,5 +68,6 @@ $(YAML_TEST_DIR): mkdir -p $@ # For any target within the tests dir, build it using the build_yaml_tests function. -$(YAML_TEST_DIR)%: +# (creation of output dir is a dependency) +$(YAML_TEST_DIR)%: $(YAML_TEST_DIR) $(call build_yaml_tests,$*) diff --git a/test_generators/README.md b/test_generators/README.md index e72f9caf4..f4dca7977 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -17,7 +17,7 @@ pre-requisites: ### Cleaning -This removes the existing virtual environments (`/test_generators/.venvs/`), and generated tests (`/yaml_tests/`). +This removes the existing virtual environments (`/test_generators//venv`), and generated tests (`/yaml_tests/`). ```bash make clean @@ -28,7 +28,7 @@ make clean This runs all the generators. ```bash -make all +make gen_yaml_tests ``` ### Running a single generator @@ -37,7 +37,7 @@ The make file auto-detects generators in the `test_generators/` directory, and provides a tests-gen target for each generator, see example. ```bash -make ./tests/shuffling/ +make ./yaml_tests/shuffling/ ``` ## Developing a generator @@ -45,6 +45,7 @@ make ./tests/shuffling/ Simply open up the generator (not all at once) of choice in your favorite IDE/editor, and run: ```bash +# From the root of the generator directory: # Create a virtual environment (any venv/.venv/.venvs is git-ignored) python3 -m venv venv # Activate the venv, this is where dependencies are installed for the generator @@ -143,20 +144,11 @@ Note: you do not have to change the makefile. However, if necessary (e.g. not using python, or mixing in other languages), submit an issue, and it can be a special case. Do note that generators should be easy to maintain, lean, and based on the spec. -All of this should be done in a pull request to the master branch. - -To deploy new tests to the testing repository: - -1. Create a release tag with a new version number on Github. -2. Increment either the: - - major version, to indicate a change in the general testing format - - minor version, if a new test generator has been added - - path version, in other cases. ## How to remove a test generator If a test generator is not needed anymore, undo the steps described above and make a new release: -1. remove the generator folder +1. remove the generator directory 2. remove the generated tests in the `eth2.0-tests` repository by opening a PR there. 3. make a new release From aa4bbcc1c82ba9435cdee00c57fca66268fdb229 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 29 Mar 2019 00:43:28 +0800 Subject: [PATCH 136/481] Bugfix --- specs/core/0_beacon-chain.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 120aa89d2..094206f13 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -596,7 +596,7 @@ The types are defined topologically to aid in facilitating an executable version # Exit queue 'exit_epoch': 'uint64', - 'exit_queue_filled': 'uint64' + 'exit_queue_filled': 'uint64', # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -1974,9 +1974,10 @@ def process_ejections(state: BeaconState) -> None: and deposit or eject active validators with sufficiently high or low balances """ for index, validator in enumerate(state.validator_registry): + balance = get_balance(state, index) if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: state.activation_eligibility_epoch = get_current_epoch(state) - if is_active(validator, get_current_epoch(state)) and get_balance(state, index) < EJECTION_BALANCE: + if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: initiate_validator_exit(state, index) ``` @@ -1986,12 +1987,13 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: + current_epoch = get_current_epoch(state) # Check if we should update, and if so, update if state.finalized_epoch > state.validator_registry_update_epoch: # Validator indices that could be activated indices_for_activation = sorted( filter( - lambda index: state.validator_registry[index].activation_epoch == FAR_FUTURE_EPOCH + lambda index: state.validator_registry[index].activation_epoch == FAR_FUTURE_EPOCH, get_active_validator_indices(state.validator_registry, current_epoch), ), key=lambda index: state.validator_registry[index].activation_eligibility_epoch From ec37645e8f16114b03700438511934f7b692da6f Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 29 Mar 2019 00:51:49 +0800 Subject: [PATCH 137/481] update pyspec readme --- test_libs/pyspec/README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 25ee737f7..ee6bc5325 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -7,9 +7,19 @@ With this executable spec, test-generators can easily create test-vectors for client implementations, and the spec itself can be verified to be consistent and coherent, through sanity tests implemented with pytest. +## Building + +All the dynamic parts of the spec can be build at once with `make pyspec`. + +Alternatively, you can build a sub-set of the pyspec: `make phase0`. + +Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2/phase0/spec.py` + +## Contributing + Contributions are welcome, but consider implementing your idea as part of the spec itself first. The pyspec is not a replacement. -If you see opportunity to include any of the `utils/` code in the spec, +If you see opportunity to include any of the `eth2/utils/` code in the spec, please submit an issue or PR. ## License From 1f657cfec50b1c41e53a9183193047fc420d3d8d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 11:26:04 -0600 Subject: [PATCH 138/481] remove custody_bitfield from indexedattestation. add two separate arrays for 0 and 1 bit --- specs/core/0_beacon-chain.md | 45 +++++++++++++----------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0bdfafb79..057772293 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -377,11 +377,10 @@ The types are defined topologically to aid in facilitating an executable version ```python { # Validator indices - 'validator_indices': ['uint64'], + 'custody_bit_0_indices': ['uint64'], + 'custody_bit_1_indices': ['uint64'], # Attestation data 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', # Aggregate signature 'aggregate_signature': 'bytes96', } @@ -1060,7 +1059,7 @@ def get_attestation_participants(state: BeaconState, attestation_data: AttestationData, bitfield: bytes) -> List[ValidatorIndex]: """ - Return the participant indices corresponding to ``attestation_data`` and ``bitfield``. + Return the sorted participant indices corresponding to ``attestation_data`` and ``bitfield``. """ crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) @@ -1072,7 +1071,7 @@ def get_attestation_participants(state: BeaconState, aggregation_bit = get_bitfield_bit(bitfield, i) if aggregation_bit == 0b1: participants.append(validator_index) - return participants + return sorted(participants) ``` ### `int_to_bytes1`, `int_to_bytes2`, ... @@ -1184,20 +1183,13 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation): Convert an attestation to (almost) indexed-verifiable form """ attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - - # reconstruct custody bitfield for the truncated attesting_indices custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) - custody_bitfield = b'\x00' * ((len(attesting_indices) + 7) // 8) - - crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) - for i, validator_index in enumerate(crosslink_committee): - if get_bitfield_bit(attestation.custody_bitfield, i): - custody_bitfield = set_bitfield_bit(custody_bitfield, attesting_indices.index(validator_index)) + custody_bit_0_indices = [index for index in attesting_indices if index not in custody_bit_1_indices] return IndexedAttestation( - validator_indices=attesting_indices, + custody_bit_0_indices=custody_bit_0_indices, + custody_bit_1_indices=custody_bit_1_indices, data=attestation.data, - custody_bitfield=custody_bitfield, aggregate_signature=attestation.aggregate_signature ) ``` @@ -1209,22 +1201,21 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA """ Verify validity of ``indexed_attestation`` fields. """ - if indexed_attestation.custody_bitfield != b'\x00' * len(indexed_attestation.custody_bitfield): # [TO BE REMOVED IN PHASE 1] + custody_bit_0_indices = indexed_attestation.custody_bit_0_indices + custody_bit_1_indices = indexed_attestation.custody_bit_1_indices + + if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] return False - if not (1 <= len(indexed_attestation.validator_indices) <= MAX_ATTESTATION_PARTICIPANTS): + total_attesting_indices = len(custody_bit_0_indices + custody_bit_1_indices) + if not (1 <= total_attesting_indices <= MAX_ATTESTATION_PARTICIPANTS): return False - if not verify_bitfield(indexed_attestation.custody_bitfield, len(indexed_attestation.validator_indices)): + if custody_bit_0_indices != sorted(custody_bit_0_indices): return False - custody_bit_0_indices = [] - custody_bit_1_indices = [] - for i, validator_index in enumerate(indexed_attestation.validator_indices): - if get_bitfield_bit(indexed_attestation.custody_bitfield, i) == 0b0: - custody_bit_0_indices.append(validator_index) - else: - custody_bit_1_indices.append(validator_index) + if custody_bit_1_indices != sorted(custody_bit_1_indices): + return False return bls_verify_multiple( pubkeys=[ @@ -2355,10 +2346,6 @@ def process_attester_slashing(state: BeaconState, is_surround_vote(attestation1.data, attestation2.data) ) - # check that indices are sorted - assert attestation1.validator_indices == sorted(attestation1.validator_indices) - assert attestation2.validator_indices == sorted(attestation2.validator_indices) - assert verify_indexed_attestation(state, attestation1) assert verify_indexed_attestation(state, attestation2) slashable_indices = [ From ba47a8f4c44adebf613f5507ca48d022141a389c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 11:28:38 -0600 Subject: [PATCH 139/481] remove unused set_bitfield_bit hlper --- specs/core/0_beacon-chain.md | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 057772293..8363d9b22 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -86,7 +86,6 @@ - [`get_fork_version`](#get_fork_version) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - - [`set_bitfield_bit`](#set_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) - [`convert_to_indexed`](#convert_to_indexed) - [`verify_indexed_attestation`](#verify_indexed_attestation) @@ -1141,22 +1140,6 @@ def get_bitfield_bit(bitfield: bytes, i: int) -> int: return (bitfield[i // 8] >> (i % 8)) % 2 ``` -### `set_bitfield_bit` - -```python -def set_bitfield_bit(bitfield: bytes, i: int) -> int: - """ - Set the bit in ``bitfield`` at position ``i`` to ``1``. - """ - byte_index = i // 8 - bit_index = i % 8 - return ( - bitfield[:byte_index] + - bytes([bitfield[byte_index] | (1 << bit_index)]) + - bitfield[byte_index+1:] - ) -``` - ### `verify_bitfield` ```python From eb229089c842cac0445ab5393fe04b28c552b0ce Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 11:31:12 -0600 Subject: [PATCH 140/481] lint --- tests/phase0/helpers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 08ea6ca04..e5e335d80 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -280,7 +280,6 @@ def get_valid_attestation(state, slot=None): ) ) - attestation.aggregation_signature = bls.aggregate_signatures(signatures) return attestation From 66d5026ffe53e3473ee5f1bd3b3d81a5a8f316e8 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 28 Mar 2019 13:15:38 -0600 Subject: [PATCH 141/481] minor copy edit --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cf6527ad1..3a4de1973 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1573,7 +1573,7 @@ For a beacon chain block, `block`, to be processed by a node, the following cond If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. -Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this dynamic requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. +Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. ### Beacon chain fork choice rule From 1082c68fef660fc66f078ad5442a0065e03e3e71 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 28 Mar 2019 22:54:39 +0000 Subject: [PATCH 142/481] Separate document for phase 1 custody game (#818) The 1-round custody game has been implemented. Many bugs squashed, and a bunch of polishing done. Miscellaneous known issues (~8 of them) to be resolved in separate, smaller, PRs. --- specs/core/1_custody-game.md | 499 +++++++++++++++++++++++++++++++++++ 1 file changed, 499 insertions(+) create mode 100644 specs/core/1_custody-game.md diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md new file mode 100644 index 000000000..fd754634e --- /dev/null +++ b/specs/core/1_custody-game.md @@ -0,0 +1,499 @@ +# Ethereum 2.0 Phase 1 -- Custody Game + +**NOTICE**: This spec is a work-in-progress for researchers and implementers. + +## Table of contents + + + +- [Ethereum 2.0 Phase 1 -- Custody Game](#ethereum-20-phase-1----custody-game) + - [Table of contents](#table-of-contents) + - [Introduction](#introduction) + - [Terminology](#terminology) + - [Constants](#constants) + - [Misc](#misc) + - [Time parameters](#time-parameters) + - [Max transactions per block](#max-transactions-per-block) + - [Signature domains](#signature-domains) + - [Data structures](#data-structures) + - [Custody objects](#custody-objects) + - [`CustodyChunkChallenge`](#custodychunkchallenge) + - [`CustodyBitChallenge`](#custodybitchallenge) + - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) + - [`CustodyBitChallengeRecord`](#custodybitchallengerecord) + - [`CustodyResponse`](#custodyresponse) + - [`CustodyKeyReveal`](#custodykeyreveal) + - [Phase 0 container updates](#phase-0-container-updates) + - [`Validator`](#validator) + - [`BeaconState`](#beaconstate) + - [`BeaconBlockBody`](#beaconblockbody) + - [Helpers](#helpers) + - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) + - [`get_custody_chunk_bit`](#get_custody_chunk_bit) + - [`epoch_to_custody_period`](#epoch_to_custody_period) + - [`verify_custody_key`](#verify_custody_key) + - [Per-block processing](#per-block-processing) + - [Transactions](#transactions) + - [Custody reveals](#custody-reveals) + - [Chunk challenges](#chunk-challenges) + - [Bit challenges](#bit-challenges) + - [Custody responses](#custody-responses) + - [Per-epoch processing](#per-epoch-processing) + + + +## Introduction + +This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [phase 0](0_beacon-chain.md) specification. + +## Terminology + +* **Custody game**: +* **Custody period**: +* **Custody chunk**: +* **Custody chunk bit**: +* **Custody chunk challenge**: +* **Custody bit**: +* **Custody bit challenge**: +* **Custody key**: +* **Custody key reveal**: +* **Custody key mask**: +* **Custody response**: +* **Custody response deadline**: + +## Constants + +### Misc + +| Name | Value | +| - | - | +| `BYTES_PER_SHARD_BLOCK` | `2**14` (= 16,384) | +| `BYTES_PER_CUSTODY_CHUNK` | `2**9` (= 512) | +| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | + +### Time parameters + +| Name | Value | Unit | Duration | +| - | - | :-: | :-: | +| `MAX_CHUNK_CHALLENGE_DELAY` | `2**11` (= 2,048) | epochs | ~9 days | +| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | +| `CUSTODY_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days | + +### Max transactions per block + +| Name | Value | +| - | - | +| `MAX_CUSTODY_KEY_REVEALS` | `2**4` (= 16) | +| `MAX_CUSTODY_CHUNK_CHALLENGES` | `2**2` (= 4) | +| `MAX_CUSTODY_BIT_CHALLENGES` | `2**2` (= 4) | +| `MAX_CUSTODY_RESPONSES` | `2**5` (= 32) | + +### Signature domains + +| Name | Value | +| - | - | +| `DOMAIN_CUSTODY_KEY_REVEAL` | `6` | +| `DOMAIN_CUSTODY_BIT_CHALLENGE` | `7` | + +## Data structures + +### Custody objects + +#### `CustodyChunkChallenge` + +```python +{ + 'responder_index': ValidatorIndex, + 'attestation': Attestation, + 'chunk_index': 'uint64', +} +``` + +#### `CustodyBitChallenge` + +```python +{ + 'responder_index': ValidatorIndex, + 'attestation': Attestation, + 'challenger_index': ValidatorIndex, + 'responder_key': BLSSignature, + 'chunk_bits': Bitfield, + 'signature': BLSSignature, +} +``` + +#### `CustodyChunkChallengeRecord` + +```python +{ + 'challenge_index': 'uint64', + 'challenger_index': ValidatorIndex, + 'responder_index': ValidatorIndex, + 'deadline': Epoch, + 'crosslink_data_root': Hash, + 'depth': 'uint64', + 'chunk_index': 'uint64', +} +``` + +#### `CustodyBitChallengeRecord` + +```python +{ + 'challenge_index': 'uint64', + 'challenger_index': ValidatorIndex, + 'responder_index': ValidatorIndex, + 'deadline': Epoch, + 'crosslink_data_root': Hash, + 'chunk_bits': Bitfield, + 'responder_key': BLSSignature, +} +``` + +#### `CustodyResponse` + +```python +{ + 'challenge_index': 'uint64', + 'chunk_index': 'uint64', + 'chunk': ['byte', BYTES_PER_CUSTODY_CHUNK], + 'branch': [Hash], +} +``` + +#### `CustodyKeyReveal` + +```python +{ + 'revealer_index': ValidatorIndex, + 'period': 'uint64', + 'key': BLSSignature, + 'masker_index': ValidatorIndex, + 'mask': Hash, +} +``` + +### Phase 0 container updates + +Add the following fields to the end of the specified container objects. Fields with underlying type `uint64` are initialized to `0` and list fields are initialized to `[]`. + +#### `Validator` + +```python + 'custody_reveal_index': 'uint64', + 'max_reveal_lateness': 'uint64', +``` + +#### `BeaconState` + +```python + 'custody_chunk_challenge_records': [CustodyChunkChallengeRecord], + 'custody_bit_challenge_records': [CustodyBitChallengeRecord], + 'custody_challenge_index': 'uint64', +``` + +#### `BeaconBlockBody` + +```python + 'custody_key_reveals': [CustodyKeyReveal], + 'custody_chunk_challenges': [CustodyChunkChallenge], + 'custody_bit_challenges': [CustodyBitChallenge], + 'custody_responses': [CustodyResponse], +``` + +## Helpers + +### `get_crosslink_chunk_count` + +```python +def get_custody_chunk_count(attestation: Attestation) -> int: + crosslink_start_epoch = attestation.data.latest_crosslink.epoch + crosslink_end_epoch = slot_to_epoch(attestation.data.slot) + crosslink_crosslink_length = min(MAX_CROSSLINK_EPOCHS, end_epoch - start_epoch) + chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK + return crosslink_crosslink_length * chunks_per_epoch +``` + +### `get_custody_chunk_bit` + +```python +def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool: + # TODO: Replace with something MPC-friendly, e.g. the Legendre symbol + return get_bitfield_bit(hash(challenge.responder_key + chunk), 0) +``` + +### `epoch_to_custody_period` + +```python +def epoch_to_custody_period(epoch: Epoch) -> int: + return epoch // EPOCHS_PER_CUSTODY_PERIOD +``` + +### `verify_custody_key` + +```python +def verify_custody_key(state: BeaconState, reveal: CustodyKeyReveal) -> bool: + # Case 1: non-masked non-punitive non-early reveal + pubkeys = [state.validator_registry[reveal.revealer_index].pubkey] + message_hashes = [hash_tree_root(reveal.period)] + + # Case 2: masked punitive early reveal + # Masking prevents proposer stealing the whistleblower reward + # Secure under the aggregate extraction infeasibility assumption + # See pages 11-12 of https://crypto.stanford.edu/~dabo/pubs/papers/aggreg.pdf + if reveal.mask != ZERO_HASH: + pubkeys.append(state.validator_registry[reveal.masker_index].pubkey) + message_hashes.append(reveal.mask) + + return bls_verify_multiple( + pubkeys=pubkeys, + message_hashes=message_hashes, + signature=reveal.key, + domain=get_domain( + fork=state.fork, + epoch=reveal.period * EPOCHS_PER_CUSTODY_PERIOD, + domain_type=DOMAIN_CUSTODY_KEY_REVEAL, + ), + ) +``` + +## Per-block processing + +### Transactions + +Add the following transactions to the per-block processing, in order the given below and after all other transactions in phase 0. + +#### Custody reveals + +Verify that `len(block.body.custody_key_reveals) <= MAX_CUSTODY_KEY_REVEALS`. + +For each `reveal` in `block.body.custody_key_reveals`, run the following function: + +```python +def process_custody_reveal(state: BeaconState, + reveal: CustodyKeyReveal) -> None: + assert verify_custody_key(state, reveal) + revealer = state.validator_registry[reveal.revealer_index] + current_custody_period = epoch_to_custody_period(get_current_epoch(state)) + + # Case 1: non-masked non-punitive non-early reveal + if reveal.mask == ZERO_HASH: + assert reveal.period == epoch_to_custody_period(revealer.activation_epoch) + revealer.custody_reveal_index + # Revealer is active or exited + assert is_active_validator(revealer, get_current_epoch(state)) or revealer.exit_epoch > get_current_epoch(state) + revealer.custody_reveal_index += 1 + revealer.max_reveal_lateness = max(revealer.max_reveal_lateness, current_custody_period - reveal.period) + proposer_index = get_beacon_proposer_index(state, state.slot) + increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) + + # Case 2: masked punitive early reveal + else: + assert reveal.period > current_custody_period + assert revealer.slashed is False + slash_validator(state, reveal.revealer_index, reveal.masker_index) +``` + +#### Chunk challenges + +Verify that `len(block.body.custody_chunk_challenges) <= MAX_CUSTODY_CHUNK_CHALLENGES`. + +For each `challenge` in `block.body.custody_chunk_challenges`, run the following function: + +```python +def process_chunk_challenge(state: BeaconState, + challenge: CustodyChunkChallenge) -> None: + # Verify the attestation + assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation)) + # Verify it is not too late to challenge + assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY + responder = state.validator_registry[challenge.responder_index] + assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY + # Verify the responder participated in the attestation + attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert challenge.responder_index in attesters + # Verify the challenge is not a duplicate + for record in state.custody_chunk_challenge_records: + assert ( + record.crosslink_data_root != challenge.attestation.data.crosslink_data_root or + record.chunk_index != challenge.chunk_index + ) + # Verify depth + depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation))) + assert challenge.chunk_index < 2**depth + # Add new chunk challenge record + state.custody_chunk_challenge_records.append(CustodyChunkChallengeRecord( + challenge_index=state.custody_challenge_index, + challenger_index=get_beacon_proposer_index(state, state.slot), + responder_index=challenge.responder_index + deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE, + crosslink_data_root=challenge.attestation.data.crosslink_data_root, + depth=depth, + chunk_index=challenge.chunk_index, + )) + state.custody_challenge_index += 1 + # Postpone responder withdrawability + responder.withdrawable_epoch = FAR_FUTURE_EPOCH +``` + +#### Bit challenges + +Verify that `len(block.body.custody_bit_challenges) <= MAX_CUSTODY_BIT_CHALLENGES`. + +For each `challenge` in `block.body.custody_bit_challenges`, run the following function: + +```python +def process_bit_challenge(state: BeaconState, + challenge: CustodyBitChallenge) -> None: + # Verify challenge signature + challenger = state.validator_registry[challenge.challenger_index] + assert bls_verify( + pubkey=challenger.pubkey, + message_hash=signed_root(challenge), + signature=challenge.signature, + domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE), + ) + # Verify the challenger is not slashed + assert challenger.slashed is False + # Verify the attestation + assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation)) + # Verify the attestation is eligible for challenging + responder = state.validator_registry[challenge.responder_index] + min_challengeable_epoch = responder.exit_epoch - EPOCHS_PER_CUSTODY_PERIOD * (1 + responder.max_reveal_lateness) + assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) + # Verify the responder participated in the attestation + attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + assert challenge.responder_index in attesters + # A validator can be the challenger or responder for at most one challenge at a time + for record in state.custody_bit_challenge_records: + assert record.challenger_index != challenge.challenger_index + assert record.responder_index != challenge.responder_index + # Verify the responder key + assert verify_custody_key(state, CustodyKeyReveal( + revealer_index=challenge.responder_index, + period=epoch_to_custody_period(slot_to_epoch(attestation.data.slot)), + key=challenge.responder_key, + masker_index=0, + mask=ZERO_HASH, + )) + # Verify the chunk count + chunk_count = get_custody_chunk_count(challenge.attestation) + assert verify_bitfield(challenge.chunk_bits, chunk_count) + # Verify the xor of the chunk bits does not equal the custody bit + chunk_bits_xor = 0b0 + for i in range(chunk_count): + chunk_bits_xor ^ get_bitfield_bit(challenge.chunk_bits, i) + custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index)) + assert custody_bit != chunk_bits_xor + # Add new bit challenge record + state.custody_bit_challenge_records.append(CustodyBitChallengeRecord( + challenge_index=state.custody_challenge_index, + challenger_index=challenge.challenger_index, + responder_index=challenge.responder_index, + deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE + crosslink_data_root=challenge.attestation.crosslink_data_root, + chunk_bits=challenge.chunk_bits, + responder_key=challenge.responder_key, + )) + state.custody_challenge_index += 1 + # Postpone responder withdrawability + responder.withdrawable_epoch = FAR_FUTURE_EPOCH +``` + +#### Custody responses + +Verify that `len(block.body.custody_responses) <= MAX_CUSTODY_RESPONSES`. + +For each `response` in `block.body.custody_responses`, run the following function: + +```python +def process_custody_response(state: BeaconState, + response: CustodyResponse) -> None: + chunk_challenge = next(record for record in state.custody_chunk_challenge_records if record.challenge_index == response.challenge_index, None) + if chunk_challenge is not None: + return process_chunk_challenge_response(state, response, chunk_challenge) + + bit_challenge = next(record for record in state.custody_bit_challenge_records if record.challenge_index == response.challenge_index, None) + if bit_challenge is not None: + return process_bit_challenge_response(state, response, bit_challenge) + + assert False +``` + +```python +def process_chunk_challenge_response(state: BeaconState, + response: CustodyResponse, + challenge: CustodyChunkChallengeRecord) -> None: + # Verify chunk index + assert response.chunk_index == challenge.chunk_index + # Verify the chunk matches the crosslink data root + assert verify_merkle_branch( + leaf=hash_tree_root(response.chunk), + branch=response.branch, + depth=challenge.depth, + index=response.chunk_index, + root=challenge.crosslink_data_root, + ) + # Clear the challenge + state.custody_chunk_challenge_records.remove(challenge) + # Reward the proposer + proposer_index = get_beacon_proposer_index(state, state.slot) + increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) +``` + +```python +def process_bit_challenge_response(state: BeaconState, + response: CustodyResponse, + challenge: CustodyBitChallengeRecord) -> None: + # Verify chunk index + assert response.chunk_index < len(challenge.chunk_bits) + # Verify the chunk matches the crosslink data root + assert verify_merkle_branch( + leaf=hash_tree_root(response.chunk), + branch=response.branch, + depth=math.log2(next_power_of_two(len(challenge.chunk_bits))), + index=response.chunk_index, + root=challenge.crosslink_data_root, + ) + # Verify the chunk bit does not match the challenge chunk bit + assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index) + # Clear the challenge + state.custody_bit_challenge_records.remove(challenge) + # Slash challenger + slash_validator(state, challenge.challenger_index, challenge.responder_index) +``` + +## Per-epoch processing + +Run `process_challenge_deadlines(state)` immediately after `process_ejections(state)`: + +```python +def process_challenge_deadlines(state: BeaconState) -> None: + for challenge in state.custody_chunk_challenge_records: + if get_current_epoch(state) > challenge.deadline: + slash_validator(state, challenge.responder_index, challenge.challenger_index) + state.custody_chunk_challenge_records.remove(challenge) + + for challenge in state.custody_bit_challenge_records: + if get_current_epoch(state) > challenge.deadline: + slash_validator(state, challenge.responder_index, challenge.challenger_index) + state.custody_bit_challenge_records.remove(challenge) +``` + +In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope): + +```python +def eligible(index): + validator = state.validator_registry[index] + # Cannot exit if there are still open chunk challenges + if len([record for record in state.custody_chunk_challenge_records if record.responder_index == index]) > 0: + return False + # Cannot exit if you have not revealed all of your custody keys + elif epoch_to_custody_period(revealer.activation_epoch) + validator.custody_reveal_index <= epoch_to_custody_period(validator.exit_epoch): + return False + # Cannot exit if you already have + elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH: + return False + # Return minimum time + else: + return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS +``` From f5c5c166af0caa7d451e5e74d8c565dca3ea4cee Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 28 Mar 2019 17:56:43 -0500 Subject: [PATCH 143/481] Replace custody challenge game with JABS (#812) See also #818. === * Replace custody challenge game with JABS Replace the existing proof of custody game with a new game ("Justin's Awesome Bit Sum" or JABS) that works as follows: * The data `D` is split up into 512-byte chunks `D[0] .... D[n-1]`, and use a mix function `mix(subkey, data) -> {0,1}` (currently the first bit of the hash of `subkey+data`). We calculate `M[i] = (mix(D[0]) + ... + mix(D[i-1])) % 2`, and set the custody bit to `M[n-1]` * Anyone can challenge by providing the full `M` where `M[n-1]` is not equal to the custody bit * Anyone can respond to a challenge by providing a specific position in `M` along with a branch of the data where `M[i-1] ^ mix(D[i]) != M[i]` The maximum size of data is now `2**6` epochs * `2**6` blocks * `2**14` bytes = `2**26` bytes, so assuming 512-byte mix chunks the maximum mix size is `2**17` bits or `2**14` bytes. The average mix size is `2**8` bytes. --- specs/core/1_shard-data-chains.md | 1085 ++++++++--------------------- 1 file changed, 288 insertions(+), 797 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 92cee4d19..8f2d12a91 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -1,157 +1,143 @@ # Ethereum 2.0 Phase 1 -- Shard Data Chains -**NOTICE**: This document is a work-in-progress for researchers and implementers. It reflects recent spec changes and takes precedence over the [Python proof-of-concept implementation](https://github.com/ethereum/beacon_chain). +**NOTICE**: This document is a work-in-progress for researchers and implementers. -At the current stage, Phase 1, while fundamentally feature-complete, is still subject to change. Development teams with spare resources may consider starting on the "Shard chains and crosslink data" section; at least basic properties, such as the fact that a shard block can get created every slot and is dependent on both a parent block in the same shard and a beacon chain block at or before that same slot, are unlikely to change, though details are likely to undergo similar kinds of changes to what Phase 0 has undergone since the start of the year. - -## Table of contents +## Table of Contents -- [Ethereum 2.0 Phase 1 -- Shard Data Chains](#ethereum-20-phase-1----shard-data-chains) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [Terminology](#terminology) - - [Constants](#constants) - - [Misc](#misc) - - [Time parameters](#time-parameters) - - [Max operations per block](#max-operations-per-block) - - [Signature domains](#signature-domains) -- [Shard chains and crosslink data](#shard-chains-and-crosslink-data) - - [Helper functions](#helper-functions) - - [`get_shuffled_committee`](#get_shuffled_committee) - - [`get_persistent_committee`](#get_persistent_committee) - - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [Data Structures](#data-structures) - - [Shard chain blocks](#shard-chain-blocks) - - [Shard block processing](#shard-block-processing) - - [Verifying shard block data](#verifying-shard-block-data) - - [Verifying a crosslink](#verifying-a-crosslink) - - [Shard block fork choice rule](#shard-block-fork-choice-rule) -- [Updates to the beacon chain](#updates-to-the-beacon-chain) +- [Ethereum 2.0 Phase 1 -- Shards Data Chains](#ethereum-20-phase-1----shard-data-chains) + - [Table of Contents](#table-of-contents) + - [Introduction](#introduction) + - [Constants](#constants) + - [Misc](#misc) + - [Time parameters](#time-parameters) + - [Signature domains](#signature-domains) - [Data structures](#data-structures) - - [`Validator`](#validator) - - [`BeaconBlockBody`](#beaconblockbody) - - [`BeaconState`](#beaconstate) - - [`BranchChallenge`](#branchchallenge) - - [`BranchResponse`](#branchresponse) - - [`BranchChallengeRecord`](#branchchallengerecord) - - [`InteractiveCustodyChallengeRecord`](#interactivecustodychallengerecord) - - [`InteractiveCustodyChallengeInitiation`](#interactivecustodychallengeinitiation) - - [`InteractiveCustodyChallengeResponse`](#interactivecustodychallengeresponse) - - [`InteractiveCustodyChallengeContinuation`](#interactivecustodychallengecontinuation) - - [`SubkeyReveal`](#subkeyreveal) - - [Helpers](#helpers) - - [`get_branch_challenge_record_by_id`](#get_branch_challenge_record_by_id) - - [`get_custody_challenge_record_by_id`](#get_custody_challenge_record_by_id) - - [`get_attestation_merkle_depth`](#get_attestation_merkle_depth) - - [`epoch_to_custody_period`](#epoch_to_custody_period) - - [`slot_to_custody_period`](#slot_to_custody_period) - - [`get_current_custody_period`](#get_current_custody_period) - - [`verify_custody_subkey_reveal`](#verify_custody_subkey_reveal) - - [`verify_signed_challenge_message`](#verify_signed_challenge_message) - - [`penalize_validator`](#penalize_validator) - - [Per-slot processing](#per-slot-processing) - - [Operations](#operations) - - [Branch challenges](#branch-challenges) - - [Branch responses](#branch-responses) - - [Subkey reveals](#subkey-reveals) - - [Interactive custody challenge initiations](#interactive-custody-challenge-initiations) - - [Interactive custody challenge responses](#interactive-custody-challenge-responses) - - [Interactive custody challenge continuations](#interactive-custody-challenge-continuations) - - [Per-epoch processing](#per-epoch-processing) - - [One-time phase 1 initiation transition](#one-time-phase-1-initiation-transition) + - [`ShardBlockBody`](#shardblockbody) + - [`ShardBlock`](#shardblock) + - [`ShardBlockHeader`](#shardblockheader) + - [`ShardAttestation`](#shardattestation) + - [Helper functions](#helper-functions) + - [`get_period_committee`](#get_period_committee) + - [`get_persistent_committee`](#get_persistent_committee) + - [`get_shard_proposer_index`](#get_shard_proposer_index) + - [`get_shard_header`](#get_shard_header) + - [`verify_shard_attestation_signature`](#verify_shard_attestation_signature) + - [`compute_crosslink_data_root`](#compute_crosslink_data_root) + - [Object validity](#object-validity) + - [Shard blocks](#shard-blocks) + - [Shard attestations](#shard-attestations) + - [Beacon attestations](#beacon-attestations) + - [Shard fork choice rule](#shard-fork-choice-rule) -### Introduction +## Introduction -This document represents the specification for Phase 1 of Ethereum 2.0 -- Shard Data Chains. Phase 1 depends on the implementation of [Phase 0 -- The Beacon Chain](0_beacon-chain.md). +This document describes the shard data layer and the shard fork choice rule in Phase 1 of Ethereum 2.0. -Ethereum 2.0 consists of a central beacon chain along with `SHARD_COUNT` shard chains. Phase 1 is primarily concerned with the construction, validity, and consensus on the _data_ of these shard chains. Phase 1 does not specify shard chain state execution or account balances. This is left for future phases. +## Constants -### Terminology +### Misc -### Constants +| Name | Value | +| - | - | +| `BYTES_PER_SHARD_BLOCK_BODY` | `2**14` (= 16,384) | +| `MAX_SHARD_ATTESTIONS` | `2**4` (= 16) | +| `PHASE_1_GENESIS_EPOCH` | **TBD** | +| `PHASE_1_GENESIS_SLOT` | get_epoch_start_slot(PHASE_1_GENESIS_EPOCH) | -Phase 1 depends upon all of the constants defined in [Phase 0](0_beacon-chain.md#constants) in addition to the following: - -#### Misc - -| Name | Value | Unit | -|-------------------------------|------------------|--------| -| `SHARD_CHUNK_SIZE` | 2**5 (= 32) | bytes | -| `SHARD_BLOCK_SIZE` | 2**14 (= 16,384) | bytes | -| `MINOR_REWARD_QUOTIENT` | 2**8 (= 256) | | -| `MAX_POC_RESPONSE_DEPTH` | 5 | | -| `ZERO_PUBKEY` | int_to_bytes48(0)| | -| `VALIDATOR_NULL` | 2**64 - 1 | | - -#### Time parameters +### Time parameters | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `CROSSLINK_LOOKBACK` | 2**5 (= 32) | slots | 3.2 minutes | -| `MAX_BRANCH_CHALLENGE_DELAY` | 2**11 (= 2,048) | epochs | 9 days | -| `CUSTODY_PERIOD_LENGTH` | 2**11 (= 2,048) | epochs | 9 days | -| `PERSISTENT_COMMITTEE_PERIOD` | 2**11 (= 2,048) | epochs | 9 days | -| `CHALLENGE_RESPONSE_DEADLINE` | 2**14 (= 16,384) | epochs | 73 days | +| `CROSSLINK_LOOKBACK` | 2**0 (= 1) | epochs | 6.2 minutes | +| `PERSISTENT_COMMITTEE_PERIOD` | 2**11 (= 2,048) | epochs | ~9 days | -#### Max operations per block +### Signature domains -| Name | Value | -|----------------------------------------------------|---------------| -| `MAX_BRANCH_CHALLENGES` | 2**2 (= 4) | -| `MAX_BRANCH_RESPONSES` | 2**4 (= 16) | -| `MAX_EARLY_SUBKEY_REVEALS` | 2**4 (= 16) | -| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS` | 2 | -| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES` | 16 | -| `MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUTATIONS` | 16 | +| Name | Value | +| - | - | +| `DOMAIN_SHARD_PROPOSER` | `128` | +| `DOMAIN_SHARD_ATTESTER` | `129` | -#### Signature domains +## Data structures -| Name | Value | -|------------------------------|-----------------| -| `DOMAIN_SHARD_PROPOSER` | 129 | -| `DOMAIN_SHARD_ATTESTER` | 130 | -| `DOMAIN_CUSTODY_SUBKEY` | 131 | -| `DOMAIN_CUSTODY_INTERACTIVE` | 132 | +### `ShardBlockBody` -# Shard chains and crosslink data +```python +['byte', BYTES_PER_SHARD_BLOCK_BODY] +``` + +### `ShardBlock` + +```python +{ + 'slot': Slot, + 'shard': Shard, + 'beacon_chain_root': Hash, + 'previous_block_root': Hash, + 'data': ShardBlockBody, + 'state_root': Hash, + 'attestations': [ShardAttestation], + 'signature': BLSSignature, +} +``` + +### `ShardBlockHeader` + +```python +{ + 'slot': Slot, + 'shard': Shard, + 'beacon_chain_root': Hash, + 'previous_block_root': Hash, + 'body_root': Hash, + 'state_root': Hash, + 'attestations': [ShardAttestation], + 'signature': BLSSignature, +} +``` + +### `ShardAttestation` + +```python +{ + 'data': { + 'slot': Slot, + 'shard': Shard, + 'shard_block_root': Hash, + }, + 'aggregation_bitfield': Bitfield, + 'aggregate_signature': BLSSignature, +} +``` ## Helper functions -#### `get_shuffled_committee` +### `get_period_committee` ```python -def get_shuffled_committee(state: BeaconState, - shard: Shard, - committee_start_epoch: Epoch, - index: int, - committee_count: int) -> List[ValidatorIndex]: +def get_period_committee(state: BeaconState, + shard: Shard, + committee_start_epoch: Epoch, + index: int, + committee_count: int) -> List[ValidatorIndex]: """ - Return shuffled committee. + Return committee for a period. Used to construct persistent committees. """ active_validator_indices = get_active_validator_indices(state.validator_registry, committee_start_epoch) - length = len(active_validator_indices) seed = generate_seed(state, committee_start_epoch) - start_offset = get_split_offset( - length, - SHARD_COUNT * committee_count, - shard * committee_count + index, + return compute_committee( + validator_indices=active_validator_indices, + seed=seed, + index=shard * committee_count + index, + total_committees=SHARD_COUNT * committee_count, ) - end_offset = get_split_offset( - length, - SHARD_COUNT * committee_count, - shard * committee_count + index + 1, - ) - return [ - active_validator_indices[get_permuted_index(i, length, seed)] - for i in range(start_offset, end_offset) - ] ``` -#### `get_persistent_committee` +### `get_persistent_committee` ```python def get_persistent_committee(state: BeaconState, @@ -160,7 +146,6 @@ def get_persistent_committee(state: BeaconState, """ Return the persistent committee for the given ``shard`` at the given ``slot``. """ - earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD @@ -172,14 +157,11 @@ def get_persistent_committee(state: BeaconState, ) + 1 index = slot % committee_count - earlier_committee = get_shuffled_committee(state, shard, earlier_start_epoch, index, committee_count) - later_committee = get_shuffled_committee(state, shard, later_start_epoch, index, committee_count) + earlier_committee = get_period_committee(state, shard, earlier_start_epoch, index, committee_count) + later_committee = get_period_committee(state, shard, later_start_epoch, index, committee_count) def get_switchover_epoch(index): - return ( - bytes_to_int(hash(earlier_seed + bytes3(index))[0:8]) % - PERSISTENT_COMMITTEE_PERIOD - ) + return bytes_to_int(hash(earlier_seed + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated @@ -189,723 +171,232 @@ def get_persistent_committee(state: BeaconState, ))) ``` -#### `get_shard_proposer_index` +### `get_shard_proposer_index` ```python def get_shard_proposer_index(state: BeaconState, shard: Shard, slot: Slot) -> ValidatorIndex: - seed = hash( - state.current_shuffling_seed + - int_to_bytes8(shard) + - int_to_bytes8(slot) - ) + # Randomly shift persistent committee persistent_committee = get_persistent_committee(state, shard, slot) - # Default proposer - index = bytes_to_int(seed[0:8]) % len(persistent_committee) - # If default proposer exits, try the other proposers in order; if all are exited - # return None (ie. no block can be proposed) - validators_to_try = persistent_committee[index:] + persistent_committee[:index] - for index in validators_to_try: + seed = hash(state.current_shuffling_seed + int_to_bytes8(shard) + int_to_bytes8(slot)) + random_index = bytes_to_int(seed[0:8]) % len(persistent_committee) + persistent_committee = persistent_committee[random_index:] + persistent_committee[:random_index] + + # Search for an active proposer + for index in persistent_committee: if is_active_validator(state.validator_registry[index], get_current_epoch(state)): return index + + # No block can be proposed if no validator is active return None ``` -## Data Structures - -### Shard chain blocks - -A `ShardBlock` object has the following fields: +### `get_shard_header` ```python -{ - # Slot number - 'slot': 'uint64', - # What shard is it on - 'shard_id': 'uint64', - # Parent block's root - 'parent_root': 'bytes32', - # Beacon chain block - 'beacon_chain_ref': 'bytes32', - # Merkle root of data - 'data_root': 'bytes32' - # State root (placeholder for now) - 'state_root': 'bytes32', - # Block signature - 'signature': 'bytes96', - # Attestation - 'participation_bitfield': 'bytes', - 'aggregate_signature': 'bytes96', -} +def get_shard_header(block: ShardBlock) -> ShardBlockHeader: + return ShardBlockHeader( + slot: block.slot, + shard: block.shard, + beacon_chain_root: block.beacon_chain_root, + previous_block_root: block.previous_block_root, + body_root: hash_tree_root(block.body), + state_root: block.state_root, + attestations: block.attestations, + signature: block.signature, + ) ``` -## Shard block processing - -For a `shard_block` on a shard to be processed by a node, the following conditions must be met: - -* The `ShardBlock` pointed to by `shard_block.parent_root` has already been processed and accepted -* The signature for the block from the _proposer_ (see below for definition) of that block is included along with the block in the network message object - -To validate a block header on shard `shard_block.shard_id`, compute as follows: - -* Verify that `shard_block.beacon_chain_ref` is the hash of a block in the (canonical) beacon chain with slot less than or equal to `slot`. -* Verify that `shard_block.beacon_chain_ref` is equal to or a descendant of the `shard_block.beacon_chain_ref` specified in the `ShardBlock` pointed to by `shard_block.parent_root`. -* Let `state` be the state of the beacon chain block referred to by `shard_block.beacon_chain_ref`. -* Let `persistent_committee = get_persistent_committee(state, shard_block.shard_id, shard_block.slot)`. -* Assert `verify_bitfield(shard_block.participation_bitfield, len(persistent_committee))` -* For every `i in range(len(persistent_committee))` where `is_active_validator(state.validator_registry[persistent_committee[i]], get_current_epoch(state))` returns `False`, verify that `get_bitfield_bit(shard_block.participation_bitfield, i) == 0` -* Let `proposer_index = get_shard_proposer_index(state, shard_block.shard_id, shard_block.slot)`. -* Verify that `proposer_index` is not `None`. -* Let `msg` be the `shard_block` but with `shard_block.signature` set to `[0, 0]`. -* Verify that `bls_verify(pubkey=validators[proposer_index].pubkey, message_hash=hash(msg), signature=shard_block.signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_PROPOSER))` passes. -* Let `group_public_key = bls_aggregate_pubkeys([state.validator_registry[index].pubkey for i, index in enumerate(persistent_committee) if get_bitfield_bit(shard_block.participation_bitfield, i) is True])`. -* Verify that `bls_verify(pubkey=group_public_key, message_hash=shard_block.parent_root, sig=shard_block.aggregate_signature, domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER))` passes. - -### Verifying shard block data - -At network layer, we expect a shard block header to be broadcast along with its `block_body`. - -* Verify that `len(block_body) == SHARD_BLOCK_SIZE` -* Verify that `merkle_root(block_body)` equals the `data_root` in the header. - -### Verifying a crosslink - -A node should sign a crosslink only if the following conditions hold. **If a node has the capability to perform the required level of verification, it should NOT follow chains on which a crosslink for which these conditions do NOT hold has been included, or a sufficient number of signatures have been included that during the next state recalculation, a crosslink will be registered.** - -First, the conditions must recursively apply to the crosslink referenced in `last_crosslink_root` for the same shard (unless `last_crosslink_root` equals zero, in which case we are at the genesis). - -Second, we verify the `shard_chain_commitment`. -* Let `start_slot = state.latest_crosslinks[shard].epoch * SLOTS_PER_EPOCH + SLOTS_PER_EPOCH - CROSSLINK_LOOKBACK`. -* Let `end_slot = attestation.data.slot - attestation.data.slot % SLOTS_PER_EPOCH - CROSSLINK_LOOKBACK`. -* Let `length = end_slot - start_slot`, `headers[0] .... headers[length-1]` be the serialized block headers in the canonical shard chain from the verifer's point of view (note that this implies that `headers` and `bodies` have been checked for validity). -* Let `bodies[0] ... bodies[length-1]` be the bodies of the blocks. -* Note: If there is a missing slot, then the header and body are the same as that of the block at the most recent slot that has a block. - -We define two helpers: +### `verify_shard_attestation_signature` ```python -def pad_to_power_of_2(values: List[bytes]) -> List[bytes]: - zero_shard_block = b'\x00' * SHARD_BLOCK_SIZE - while not is_power_of_two(len(values)): - values = values + [zero_shard_block] - return values +def verify_shard_attestation_signature(state: BeaconState, + attestation: ShardAttestation) -> None: + data = attestation.data + persistent_committee = get_persistent_committee(state, data.shard, data.slot) + assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee)) + pubkeys = [] + for i, index in enumerate(persistent_committee): + if get_bitfield_bit(attestation.aggregation_bitfield, i) == 0b1 + validator = state.validator_registry[index] + assert is_active_validator(validator, get_current_epoch(state)) + pubkeys.append(validator.pubkey) + assert bls_verify( + pubkey=bls_aggregate_pubkeys(pubkeys), + message_hash=data.shard_block_root, + signature=attestation.aggregate_signature, + domain=get_domain(state, slot_to_epoch(data.slot), DOMAIN_SHARD_ATTESTER) + ) ``` -```python -def merkle_root_of_bytes(data: bytes) -> bytes: - return merkle_root([data[i:i + 32] for i in range(0, len(data), 32)]) -``` - -We define the function for computing the commitment as follows: +### `compute_crosslink_data_root` ```python -def compute_commitment(headers: List[ShardBlock], bodies: List[bytes]) -> Bytes32: +def compute_crosslink_data_root(blocks: List[ShardBlock]) -> Hash: + def is_power_of_two(value: int) -> bool: + return (value > 0) and (value & (value - 1) == 0) + + def pad_to_power_of_2(values: List[bytes]) -> List[bytes]: + while not is_power_of_two(len(values)): + values += [b'\x00' * BYTES_PER_SHARD_BLOCK_BODY] + return values + + def merkle_root_of_bytes(data: bytes) -> bytes: + return merkle_root([data[i:i + 32] for i in range(0, len(data), 32)]) + return hash( - merkle_root( - pad_to_power_of_2([ - merkle_root_of_bytes(zpad(serialize(h), SHARD_BLOCK_SIZE)) for h in headers - ]) - ) + - merkle_root( - pad_to_power_of_2([ - merkle_root_of_bytes(h) for h in bodies - ]) - ) + merkle_root(pad_to_power_of_2([ + merkle_root_of_bytes(zpad(serialize(get_shard_header(block)), BYTES_PER_SHARD_BLOCK_BODY)) for block in blocks + ])) + + merkle_root(pad_to_power_of_2([ + merkle_root_of_bytes(block.body) for block in blocks + ])) ) ``` -The `shard_chain_commitment` is only valid if it equals `compute_commitment(headers, bodies)`. +## Object validity +### Shard blocks -### Shard block fork choice rule +Let: -The fork choice rule for any shard is LMD GHOST using the shard chain attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (ie. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_ref` is the block in the main beacon chain at the specified `slot` should be considered (if the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot). - -# Updates to the beacon chain - -## Data structures - -### `Validator` - -Add member values to the end of the `Validator` object: +* `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot` +* `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]` +* `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined +* `unix_time` be the current unix time +* `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block` ```python - 'next_subkey_to_reveal': 'uint64', - 'reveal_max_periods_late': 'uint64', -``` +def is_valid_shard_block(beacon_blocks: List[BeaconBlock], + beacon_state: BeaconState, + valid_shard_blocks: List[ShardBlock], + unix_time: uint64, + candidate: ShardBlock) -> bool + # Check if block is already determined valid + for _, block in enumerate(valid_shard_blocks): + if candidate == block: + return True -And the initializers: + # Check slot number + assert block.slot >= PHASE_1_GENESIS_SLOT + assert unix_time >= beacon_state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT -```python - 'next_subkey_to_reveal': get_current_custody_period(state), - 'reveal_max_periods_late': 0, -``` + # Check shard number + assert block.shard <= SHARD_COUNT -### `BeaconBlockBody` + # Check beacon block + beacon_block = beacon_blocks[block.slot] + assert block.beacon_block_root == signed_root(beacon_block) + assert beacon_block.slot <= block.slot: -Add member values to the `BeaconBlockBody` structure: + # Check state root + assert block.state_root == ZERO_HASH # [to be removed in phase 2] -```python - 'branch_challenges': [BranchChallenge], - 'branch_responses': [BranchResponse], - 'subkey_reveals': [SubkeyReveal], - 'interactive_custody_challenge_initiations': [InteractiveCustodyChallengeInitiation], - 'interactive_custody_challenge_responses': [InteractiveCustodyChallengeResponse], - 'interactive_custody_challenge_continuations': [InteractiveCustodyChallengeContinuation], - -``` - -And initialize to the following: - -```python - 'branch_challenges': [], - 'branch_responses': [], - 'subkey_reveals': [], -``` - -### `BeaconState` - -Add member values to the `BeaconState` structure: - -```python - 'branch_challenge_records': [BranchChallengeRecord], - 'next_branch_challenge_id': 'uint64', - 'custody_challenge_records': [InteractiveCustodyChallengeRecord], - 'next_custody_challenge_id': 'uint64', -``` - -### `BranchChallenge` - -Define a `BranchChallenge` as follows: - -```python -{ - 'responder_index': 'uint64', - 'data_index': 'uint64', - 'attestation': SlashableAttestation, -} -``` - -### `BranchResponse` - -Define a `BranchResponse` as follows: - -```python -{ - 'challenge_id': 'uint64', - 'responding_to_custody_challenge': 'bool', - 'data': 'bytes32', - 'branch': ['bytes32'], -} -``` - -### `BranchChallengeRecord` - -Define a `BranchChallengeRecord` as follows: - -```python -{ - 'challenge_id': 'uint64', - 'challenger_index': 'uint64', - 'responder_index': 'uint64', - 'root': 'bytes32', - 'depth': 'uint64', - 'deadline': 'uint64', - 'data_index': 'uint64', -} -``` - -### `InteractiveCustodyChallengeRecord` - -```python -{ - 'challenge_id': 'uint64', - 'challenger_index': 'uint64', - 'responder_index': 'uint64', - # Initial data root - 'data_root': 'bytes32', - # Initial custody bit - 'custody_bit': 'bool', - # Responder subkey - 'responder_subkey': 'bytes96', - # The hash in the PoC tree in the position that we are currently at - 'current_custody_tree_node': 'bytes32', - # The position in the tree, in terms of depth and position offset - 'depth': 'uint64', - 'offset': 'uint64', - # Max depth of the branch - 'max_depth': 'uint64', - # Deadline to respond (as an epoch) - 'deadline': 'uint64', -} -``` - -### `InteractiveCustodyChallengeInitiation` - -```python -{ - 'attestation': SlashableAttestation, - 'responder_index': 'uint64', - 'challenger_index': 'uint64', - 'responder_subkey': 'bytes96', - 'signature': 'bytes96', -} -``` - -### `InteractiveCustodyChallengeResponse` - -```python -{ - 'challenge_id': 'uint64', - 'hashes': ['bytes32'], - 'signature': 'bytes96', -} -``` - -### `InteractiveCustodyChallengeContinuation` - -```python -{ - 'challenge_id': 'uint64', - 'sub_index': 'uint64', - 'new_custody_tree_node': 'bytes32', - 'proof': ['bytes32'], - 'signature': 'bytes96', -} -``` - -### `SubkeyReveal` - -Define a `SubkeyReveal` as follows: - -```python -{ - 'validator_index': 'uint64', - 'period': 'uint64', - 'subkey': 'bytes96', - 'mask': 'bytes32', - 'revealer_index': 'uint64' -} -``` - -## Helpers - -### `get_branch_challenge_record_by_id` - -```python -def get_branch_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord: - return [c for c in state.branch_challenges if c.challenge_id == id][0] -``` - -### `get_custody_challenge_record_by_id` - -```python -def get_custody_challenge_record_by_id(state: BeaconState, id: int) -> BranchChallengeRecord: - return [c for c in state.branch_challenges if c.challenge_id == id][0] -``` - -### `get_attestation_merkle_depth` - -```python -def get_attestation_merkle_depth(attestation: Attestation) -> int: - start_epoch = attestation.data.latest_crosslink.epoch - end_epoch = slot_to_epoch(attestation.data.slot) - chunks_per_slot = SHARD_BLOCK_SIZE // 32 - chunks = (end_epoch - start_epoch) * EPOCH_LENGTH * chunks_per_slot - return log2(next_power_of_two(chunks)) -``` - -### `epoch_to_custody_period` - -```python -def epoch_to_custody_period(epoch: Epoch) -> int: - return epoch // CUSTODY_PERIOD_LENGTH -``` - -### `slot_to_custody_period` - -```python -def slot_to_custody_period(slot: Slot) -> int: - return epoch_to_custody_period(slot_to_epoch(slot)) -``` - -### `get_current_custody_period` - -```python -def get_current_custody_period(state: BeaconState) -> int: - return epoch_to_custody_period(get_current_epoch(state)) -``` - -### `verify_custody_subkey_reveal` - -```python -def verify_custody_subkey_reveal(pubkey: bytes48, - subkey: bytes96, - mask: bytes32, - mask_pubkey: bytes48, - period: int) -> bool: - # Legitimate reveal: checking that the provided value actually is the subkey - if mask == ZERO_HASH: - pubkeys=[pubkey] - message_hashes=[hash(int_to_bytes8(period))] - - # Punitive early reveal: checking that the provided value is a valid masked subkey - # (masking done to prevent "stealing the reward" from a whistleblower by block proposers) - # Secure under the aggregate extraction infeasibility assumption described on page 11-12 - # of https://crypto.stanford.edu/~dabo/pubs/papers/aggreg.pdf + # Check parent block + if block.slot == PHASE_1_GENESIS_SLOT: + assert candidate.previous_block_root == ZERO_HASH else: - pubkeys=[pubkey, mask_pubkey] - message_hashes=[hash(int_to_bytes8(period)), mask] - - return bls_multi_verify( - pubkeys=pubkeys, - message_hashes=message_hashes, - signature=subkey, - domain=get_domain( - fork=state.fork, - epoch=period * CUSTODY_PERIOD_LENGTH, - domain_type=DOMAIN_CUSTODY_SUBKEY, - ) - ) -``` + parent_block = next( + block for block in valid_shard_blocks if + signed_root(block) == candidate.previous_block_root + , None) + assert parent_block != None + assert parent_block.shard == block.shard + assert parent_block.slot < block.slot + assert signed_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root -### `verify_signed_challenge_message` + # Check attestations + assert len(block.attestations) <= MAX_SHARD_ATTESTIONS + for _, attestation in enumerate(block.attestations): + assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot + assert attesation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY + assert attetation.data.shart == block.shard + verify_shard_attestation_signature(beacon_state, attestation) -```python -def verify_signed_challenge_message(message: Any, pubkey: bytes48) -> bool: - return bls_verify( - message_hash=signed_root(message), - pubkey=pubkey, - signature=message.signature, - domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_INTERACTIVE) + # Check signature + proposer_index = get_shard_proposer_index(beacon_state, block.shard, block.slot) + assert proposer_index is not None + assert bls_verify( + pubkey=validators[proposer_index].pubkey, + message_hash=signed_root(block), + signature=block.signature, + domain=get_domain(beacon_state, slot_to_epoch(block.slot), DOMAIN_SHARD_PROPOSER) ) + return True ``` -### `penalize_validator` +### Shard attestations -Change the definition of `penalize_validator` as follows: +Let: + +* `valid_shard_blocks` be the list of valid `ShardBlock` +* `beacon_state` be the canonical `BeaconState` +* `candidate` be a candidate `ShardAttestation` for which validity is to be determined by running `is_valid_shard_attestation` ```python -def penalize_validator(state: BeaconState, index: ValidatorIndex, whistleblower_index=None:ValidatorIndex) -> None: - """ - Penalize the validator of the given ``index``. - Note that this function mutates ``state``. - """ - exit_validator(state, index) - validator = state.validator_registry[index] - state.latest_penalized_balances[get_current_epoch(state) % LATEST_PENALIZED_EXIT_LENGTH] += get_effective_balance(state, index) - - block_proposer_index = get_beacon_proposer_index(state, state.slot) - whistleblower_reward = get_effective_balance(state, index) // WHISTLEBLOWER_REWARD_QUOTIENT - if whistleblower_index is None: - state.validator_balances[block_proposer_index] += whistleblower_reward +def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock], + beacon_state: BeaconState, + candidate: Attestation) -> bool: + # Check shard block + shard_block = next( + block for block in valid_shard_blocks if + signed_root(block) == candidate.attestation.data.shard_block_root + , None) + assert shard_block != None + assert shard_block.slot == attestation.data.slot + assert shard_block.shard == attestation.data.shard + + # Check signature + verify_shard_attestation_signature(beacon_state, attestation) + + return True +``` + +### Beacon attestations + +Let: + +* `shard` be a valid `Shard` +* `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot` +* `beacon_state` be the canonical `BeaconState` +* `valid_attestations` be the list of valid `Attestation`, recursively defined +* `candidate` be a candidate `Attestation` which is valid under phase 0 rules, and for which validity is to be determined under phase 1 rules by running `is_valid_beacon_attestation` + +```python +def is_valid_beacon_attestation(shard: Shard, + shard_blocks: List[ShardBlock], + beacon_state: BeaconState, + valid_attestations: List[Attestation], + candidate: Attestation) -> bool: + # Check if attestation is already determined valid + for _, attestation in enumerate(valid_attestations): + if candidate == attestation: + return True + + # Check previous attestation + if candidate.data.previous_crosslink.epoch <= PHASE_1_GENESIS_EPOCH: + assert candidate.data.previous_crosslink.crosslink_data_root == ZERO_HASH else: - state.validator_balances[whistleblower_index] += ( - whistleblower_reward * INCLUDER_REWARD_QUOTIENT / (INCLUDER_REWARD_QUOTIENT + 1) - ) - state.validator_balances[block_proposer_index] += whistleblower_reward / (INCLUDER_REWARD_QUOTIENT + 1) - state.validator_balances[index] -= whistleblower_reward - validator.penalized_epoch = get_current_epoch(state) - validator.withdrawable_epoch = get_current_epoch(state) + LATEST_PENALIZED_EXIT_LENGTH + previous_attestation = next( + attestation for attestation in valid_attestations if + attestation.data.crosslink_data_root == candidate.data.previous_crosslink.crosslink_data_root + , None) + assert previous_attestation != None + assert candidate.data.previous_attestation.epoch < slot_to_epoch(candidate.data.slot) + + # Check crosslink data root + start_epoch = state.latest_crosslinks[shard].epoch + end_epoch = min(slot_to_epoch(candidate.data.slot) - CROSSLINK_LOOKBACK, start_epoch + MAX_CROSSLINK_EPOCHS) + blocks = [] + for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH): + blocks.append(shard_blocks[slot]) + assert candidate.data.crosslink_data_root == compute_crosslink_data_root(blocks) + + return True ``` -The only change is that this introduces the possibility of a penalization where the "whistleblower" that takes credit is NOT the block proposer. +## Shard fork choice rule -## Per-slot processing - -### Operations - -Add the following operations to the per-slot processing, in order the given below and _after_ all other operations (specifically, right after exits). - -#### Branch challenges - -Verify that `len(block.body.branch_challenges) <= MAX_BRANCH_CHALLENGES`. - -For each `challenge` in `block.body.branch_challenges`, run: - -```python -def process_branch_challenge(state: BeaconState, - challenge: BranchChallenge) -> None: - # Check that it's not too late to challenge - assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY - assert state.validator_registry[responder_index].exit_epoch >= get_current_epoch(state) - MAX_BRANCH_CHALLENGE_DELAY - # Check the attestation is valid - assert verify_slashable_attestation(state, challenge.attestation) - # Check that the responder participated - assert challenger.responder_index in challenge.attestation.validator_indices - # Check the challenge is not a duplicate - assert [ - c for c in state.branch_challenge_records if c.root == challenge.attestation.data.crosslink_data_root and - c.data_index == challenge.data_index - ] == [] - # Check validity of depth - depth = get_attestation_merkle_depth(challenge.attestation) - assert c.data_index < 2**depth - # Add new challenge - state.branch_challenge_records.append(BranchChallengeRecord( - challenge_id=state.next_branch_challenge_id, - challenger_index=get_beacon_proposer_index(state, state.slot), - root=challenge.attestation.data.shard_chain_commitment, - depth=depth, - deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE, - data_index=challenge.data_index - )) - state.next_branch_challenge_id += 1 -``` - -#### Branch responses - -Verify that `len(block.body.branch_responses) <= MAX_BRANCH_RESPONSES`. - -For each `response` in `block.body.branch_responses`, if `response.responding_to_custody_challenge == False`, run: - -```python -def process_branch_exploration_response(state: BeaconState, - response: BranchResponse) -> None: - challenge = get_branch_challenge_record_by_id(response.challenge_id) - assert verify_merkle_branch( - leaf=response.data, - branch=response.branch, - depth=challenge.depth, - index=challenge.data_index, - root=challenge.root - ) - # Must wait at least ENTRY_EXIT_DELAY before responding to a branch challenge - assert get_current_epoch(state) >= challenge.inclusion_epoch + ENTRY_EXIT_DELAY - state.branch_challenge_records.pop(challenge) - # Reward the proposer - proposer_index = get_beacon_proposer_index(state, state.slot) - state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT -``` - -If `response.responding_to_custody_challenge == True`, run: - -```python -def process_branch_custody_response(state: BeaconState, - response: BranchResponse) -> None: - challenge = get_custody_challenge_record_by_id(response.challenge_id) - responder = state.validator_registry[challenge.responder_index] - # Verify we're not too late - assert get_current_epoch(state) < responder.withdrawable_epoch - # Verify the Merkle branch *of the data tree* - assert verify_merkle_branch( - leaf=response.data, - branch=response.branch, - depth=challenge.max_depth, - index=challenge.offset, - root=challenge.data_root - ) - # Responder wins - if hash(challenge.responder_subkey + response.data) == challenge.current_custody_tree_node: - penalize_validator(state, challenge.challenger_index, challenge.responder_index) - # Challenger wins - else: - penalize_validator(state, challenge.responder_index, challenge.challenger_index) - state.custody_challenge_records.pop(challenge) -``` - -#### Subkey reveals - -Verify that `len(block.body.early_subkey_reveals) <= MAX_EARLY_SUBKEY_REVEALS`. - -For each `reveal` in `block.body.early_subkey_reveals`: - -* Verify that `verify_custody_subkey_reveal(state.validator_registry[reveal.validator_index].pubkey, reveal.subkey, reveal.period, reveal.mask, state.validator_registry[reveal.revealer_index].pubkey)` returns `True`. -* Let `is_early_reveal = reveal.period > get_current_custody_period(state) or (reveal.period == get_current_custody_period(state) and state.validator_registry[reveal.validator_index].exit_epoch > get_current_epoch(state))` (ie. either the reveal is of a future period, or it's of the current period and the validator is still active) -* Verify that one of the following is true: - * (i) `is_early_reveal` is `True` - * (ii) `is_early_reveal` is `False` and `reveal.period == state.validator_registry[reveal.validator_index].next_subkey_to_reveal` (revealing a past subkey, or a current subkey for a validator that has exited) and `reveal.mask == ZERO_HASH` - -In case (i): - -* Verify that `state.validator_registry[reveal.validator_index].penalized_epoch > get_current_epoch(state). -* Run `penalize_validator(state, reveal.validator_index, reveal.revealer_index)`. -* Set `state.validator_balances[reveal.revealer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT` - -In case (ii): - -* Determine the proposer `proposer_index = get_beacon_proposer_index(state, state.slot)` and set `state.validator_balances[proposer_index] += base_reward(state, index) // MINOR_REWARD_QUOTIENT`. -* Set `state.validator_registry[reveal.validator_index].next_subkey_to_reveal += 1` -* Set `state.validator_registry[reveal.validator_index].reveal_max_periods_late = max(state.validator_registry[reveal.validator_index].reveal_max_periods_late, get_current_period(state) - reveal.period)`. - -#### Interactive custody challenge initiations - -Verify that `len(block.body.interactive_custody_challenge_initiations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_INITIATIONS`. - -For each `initiation` in `block.body.interactive_custody_challenge_initiations`, use the following function to process it: - -```python -def process_initiation(state: BeaconState, - initiation: InteractiveCustodyChallengeInitiation) -> None: - challenger = state.validator_registry[initiation.challenger_index] - responder = state.validator_registry[initiation.responder_index] - # Verify the signature - assert verify_signed_challenge_message(initiation, challenger.pubkey) - # Verify the attestation - assert verify_slashable_attestation(initiation.attestation, state) - # Check that the responder actually participated in the attestation - assert initiation.responder_index in attestation.validator_indices - # Any validator can be a challenger or responder of max 1 challenge at a time - for c in state.custody_challenge_records: - assert c.challenger_index != initiation.challenger_index - assert c.responder_index != initiation.responder_index - # Can't challenge if you've been penalized - assert challenger.penalized_epoch == FAR_FUTURE_EPOCH - # Make sure the revealed subkey is valid - assert verify_custody_subkey_reveal( - pubkey=state.validator_registry[responder_index].pubkey, - subkey=initiation.responder_subkey, - period=slot_to_custody_period(attestation.data.slot) - ) - # Verify that the attestation is still eligible for challenging - min_challengeable_epoch = responder.exit_epoch - CUSTODY_PERIOD_LENGTH * (1 + responder.reveal_max_periods_late) - assert min_challengeable_epoch <= slot_to_epoch(initiation.attestation.data.slot) - # Create a new challenge object - state.branch_challenge_records.append(InteractiveCustodyChallengeRecord( - challenge_id=state.next_branch_challenge_id, - challenger_index=initiation.challenger_index, - responder_index=initiation.responder_index, - data_root=attestation.custody_commitment, - custody_bit=get_bitfield_bit(attestation.custody_bitfield, attestation.validator_indices.index(responder_index)), - responder_subkey=responder_subkey, - current_custody_tree_node=ZERO_HASH, - depth=0, - offset=0, - max_depth=get_attestation_data_merkle_depth(initiation.attestation.data), - deadline=get_current_epoch(state) + CHALLENGE_RESPONSE_DEADLINE - )) - state.next_branch_challenge_id += 1 - # Responder can't withdraw yet! - state.validator_registry[responder_index].withdrawable_epoch = FAR_FUTURE_EPOCH -``` - -#### Interactive custody challenge responses - -A response provides 32 hashes that are under current known proof of custody tree node. Note that at the beginning the tree node is just one bit of the custody root, so we ask the responder to sign to commit to the top 5 levels of the tree and therefore the root hash; at all other stages in the game responses are self-verifying. - -Verify that `len(block.body.interactive_custody_challenge_responses) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_RESPONSES`. - -For each `response` in `block.body.interactive_custody_challenge_responses`, use the following function to process it: - -```python -def process_response(state: BeaconState, - response: InteractiveCustodyChallengeResponse) -> None: - challenge = get_custody_challenge_record_by_id(state, response.challenge_id) - responder = state.validator_registry[challenge.responder_index] - # Check that the right number of hashes was provided - expected_depth = min(challenge.max_depth - challenge.depth, MAX_POC_RESPONSE_DEPTH) - assert 2**expected_depth == len(response.hashes) - # Must make some progress! - assert expected_depth > 0 - # Check the hashes match the previously provided root - root = merkle_root(response.hashes) - # If this is the first response check the bit and the signature and set the root - if challenge.depth == 0: - assert get_bitfield_bit(root, 0) == challenge.custody_bit - assert verify_signed_challenge_message(response, responder.pubkey) - challenge.current_custody_tree_node = root - # Otherwise just check the response against the root - else: - assert root == challenge_data.current_custody_tree_node - # Update challenge data - challenge.deadline=FAR_FUTURE_EPOCH - responder.withdrawable_epoch = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH -``` - -#### Interactive custody challenge continuations - -Once a response provides 32 hashes, the challenger has the right to choose any one of them that they feel is constructed incorrectly to continue the game. Note that eventually, the game will get to the point where the `new_custody_tree_node` is a leaf node. - -Verify that `len(block.body.interactive_custody_challenge_continuations) <= MAX_INTERACTIVE_CUSTODY_CHALLENGE_CONTINUATIONS`. - -For each `continuation` in `block.body.interactive_custody_challenge_continuations`, use the following function to process it: - -```python -def process_continuation(state: BeaconState, - continuation: InteractiveCustodyChallengeContinuation) -> None: - challenge = get_custody_challenge_record_by_id(state, continuation.challenge_id) - challenger = state.validator_registry[challenge.challenger_index] - responder = state.validator_registry[challenge.responder_index] - expected_depth = min(challenge_data.max_depth - challenge_data.depth, MAX_POC_RESPONSE_DEPTH) - # Verify we're not too late - assert get_current_epoch(state) < responder.withdrawable_epoch - # Verify the Merkle branch (the previous custody response provided the next level of hashes so the - # challenger has the info to make any Merkle branch) - assert verify_merkle_branch( - leaf=new_custody_tree_node, - branch=continuation.proof, - depth=expected_depth, - index=sub_index, - root=challenge_data.current_custody_tree_node - ) - # Verify signature - assert verify_signed_challenge_message(continuation, challenger.pubkey) - # Update the challenge data - challenge.current_custody_tree_node = continuation.new_custody_tree_node - challenge.depth += expected_depth - challenge.deadline = get_current_epoch(state) + MAX_POC_RESPONSE_DEPTH - responder.withdrawable_epoch = FAR_FUTURE_EPOCH - challenge.offset = challenge_data.offset * 2**expected_depth + sub_index -``` - -## Per-epoch processing - -Add the following loop immediately below the `process_ejections` loop: - -```python -def process_challenge_absences(state: BeaconState) -> None: - """ - Iterate through the challenge list - and penalize validators with balance that did not answer challenges. - """ - for c in state.branch_challenge_records: - if get_current_epoch(state) > c.deadline: - penalize_validator(state, c.responder_index, c.challenger_index) - - for c in state.custody_challenge_records: - if get_current_epoch(state) > c.deadline: - penalize_validator(state, c.responder_index, c.challenger_index) - if get_current_epoch(state) > state.validator_registry[c.responder_index].withdrawable_epoch: - penalize_validator(state, c.challenger_index, c.responder_index) -``` - -In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope): - -```python -def eligible(index): - validator = state.validator_registry[index] - # Cannot exit if there are still open branch challenges - if [c for c in state.branch_challenge_records if c.responder_index == index] != []: - return False - # Cannot exit if you have not revealed all of your subkeys - elif validator.next_subkey_to_reveal <= epoch_to_custody_period(validator.exit_epoch): - return False - # Cannot exit if you already have - elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH: - return False - # Return minimum time - else: - return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS -``` - -## One-time phase 1 initiation transition - -Run the following on the fork block after per-slot processing and before per-block and per-epoch processing. - -For all `validator` in `ValidatorRegistry`, update it to the new format and fill the new member values with: - -```python - 'next_subkey_to_reveal': get_current_custody_period(state), - 'reveal_max_periods_late': 0, -``` - -Update the `BeaconState` to the new format and fill the new member values with: - -```python - 'branch_challenge_records': [], - 'next_branch_challenge_id': 0, - 'custody_challenge_records': [], - 'next_custody_challenge_id': 0, -``` +The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot.) From a2dae9a8e0fced5b4b57aef1944b153a6bfb0091 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 29 Mar 2019 15:26:26 +0800 Subject: [PATCH 144/481] Fix after merging --- specs/core/0_beacon-chain.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 7719b4d13..16141b399 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1381,15 +1381,19 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: # Operation is a no-op if validator is already in the queue if validator.exit_epoch == FAR_FUTURE_EPOCH: # Update exit queue counters - if state.exit_epoch < get_delayed_activation_exit_epoch(get_current_epoch(state)): - state.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + if state.exit_epoch < delayed_activation_exit_epoch: + state.exit_epoch = delayed_activation_exit_epoch + if state.exit_queue_filled >= MAX_EXIT_DEQUEUES_PER_EPOCH: state.exit_epoch += 1 state.exit_queue_filled = 0 + # Set validator exit epoch and withdrawable epoch if validator.exit_epoch > state.exit_epoch: validator.exit_epoch = state.exit_epoch validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + # Extend queue state.exit_queue_filled += 1 ``` @@ -1402,7 +1406,7 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. """ - initiate_validator_exit(state, index) + initiate_validator_exit(state, slashed_index) state.validator_registry[slashed_index].slashed = True state.validator_registry[slashed_index].withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH slashed_balance = get_effective_balance(state, slashed_index) @@ -1997,7 +2001,7 @@ def apply_rewards(state: BeaconState) -> None: Run `process_balance_driven_status_transitions(state)`. ```python -def process_ejections(state: BeaconState) -> None: +def process_balance_driven_status_transitions(state: BeaconState) -> None: """ Iterate through the validator registry and deposit or eject active validators with sufficiently high or low balances @@ -2005,7 +2009,8 @@ def process_ejections(state: BeaconState) -> None: for index, validator in enumerate(state.validator_registry): balance = get_balance(state, index) if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: - state.activation_eligibility_epoch = get_current_epoch(state) + state.activation_eligibility_epoch = get_current_epoch(state) + if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: initiate_validator_exit(state, index) ``` From 00c3c1e2a6a121d77efdab576e9bdbcccae4e5a0 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 30 Mar 2019 00:24:04 +0800 Subject: [PATCH 145/481] rename eth2 pkg to pyspec, per request of hww --- .gitignore | 2 +- Makefile | 8 ++++---- .../block_processing/test_process_attestation.py | 6 +++--- .../block_processing/test_process_block_header.py | 2 +- .../phase0/block_processing/test_process_deposit.py | 4 ++-- .../block_processing/test_process_proposer_slashing.py | 4 ++-- .../phase0/block_processing/test_voluntary_exit.py | 4 ++-- py_tests/phase0/conftest.py | 2 +- py_tests/phase0/helpers.py | 8 ++++---- py_tests/phase0/test_sanity.py | 10 +++++----- scripts/phase0/build_spec.py | 4 ++-- test_generators/README.md | 2 +- test_libs/pyspec/README.md | 4 ++-- test_libs/pyspec/{eth2 => pyspec}/__init__.py | 0 test_libs/pyspec/{eth2 => pyspec}/debug/__init__.py | 0 test_libs/pyspec/{eth2 => pyspec}/debug/jsonize.py | 2 +- test_libs/pyspec/{eth2 => pyspec}/phase0/__init__.py | 0 .../pyspec/{eth2 => pyspec}/phase0/state_transition.py | 0 test_libs/pyspec/{eth2 => pyspec}/utils/__init__.py | 0 test_libs/pyspec/{eth2 => pyspec}/utils/bls_stub.py | 0 .../pyspec/{eth2 => pyspec}/utils/hash_function.py | 0 .../pyspec/{eth2 => pyspec}/utils/merkle_minimal.py | 0 test_libs/pyspec/{eth2 => pyspec}/utils/minimal_ssz.py | 0 23 files changed, 31 insertions(+), 31 deletions(-) rename test_libs/pyspec/{eth2 => pyspec}/__init__.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/debug/__init__.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/debug/jsonize.py (97%) rename test_libs/pyspec/{eth2 => pyspec}/phase0/__init__.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/phase0/state_transition.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/utils/__init__.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/utils/bls_stub.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/utils/hash_function.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/utils/merkle_minimal.py (100%) rename test_libs/pyspec/{eth2 => pyspec}/utils/minimal_ssz.py (100%) diff --git a/.gitignore b/.gitignore index b12e536ff..909996e73 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,4 @@ yaml_tests/ .pytest_cache # Dynamically built from Markdown spec -test_libs/pyspec/eth2/phase0/spec.py +test_libs/pyspec/pyspec/phase0/spec.py diff --git a/Makefile b/Makefile index 93c2e49fa..34d347118 100644 --- a/Makefile +++ b/Makefile @@ -12,18 +12,18 @@ GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) -PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2/phase0/spec.py +PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/pyspec/phase0/spec.py PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) .PHONY: clean all test gen_yaml_tests pyspec phase0 -all: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) $(PY_SPEC_ALL_TARGETS) +all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) clean: rm -rf $(YAML_TEST_DIR) rm -rf $(GENERATOR_VENVS) - rm -rf $(PY_TEST_DIR)/venv + rm -rf $(PY_TEST_DIR)/venv $(PY_TEST_DIR)/.pytest_cache rm -rf $(PY_SPEC_ALL_TARGETS) # "make gen_yaml_tests" to run generators @@ -40,7 +40,7 @@ pyspec: $(PY_SPEC_ALL_TARGETS) phase0: $(PY_SPEC_PHASE_0_TARGETS) -$(PY_SPEC_DIR)/eth2/phase0/spec.py: +$(PY_SPEC_DIR)/pyspec/phase0/spec.py: python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ diff --git a/py_tests/phase0/block_processing/test_process_attestation.py b/py_tests/phase0/block_processing/test_process_attestation.py index ef6c0469b..d454d6be4 100644 --- a/py_tests/phase0/block_processing/test_process_attestation.py +++ b/py_tests/phase0/block_processing/test_process_attestation.py @@ -1,12 +1,12 @@ from copy import deepcopy import pytest -import eth2.phase0.spec as spec +import pyspec.phase0.spec as spec -from eth2.phase0.state_transition import ( +from pyspec.phase0.state_transition import ( state_transition, ) -from eth2.phase0.spec import ( +from pyspec.phase0.spec import ( get_current_epoch, process_attestation, slot_to_epoch, diff --git a/py_tests/phase0/block_processing/test_process_block_header.py b/py_tests/phase0/block_processing/test_process_block_header.py index 0c4a930c2..6c40260d5 100644 --- a/py_tests/phase0/block_processing/test_process_block_header.py +++ b/py_tests/phase0/block_processing/test_process_block_header.py @@ -2,7 +2,7 @@ from copy import deepcopy import pytest -from eth2.phase0.spec import ( +from pyspec.phase0.spec import ( get_beacon_proposer_index, cache_state, advance_slot, diff --git a/py_tests/phase0/block_processing/test_process_deposit.py b/py_tests/phase0/block_processing/test_process_deposit.py index bda014665..cf911e29a 100644 --- a/py_tests/phase0/block_processing/test_process_deposit.py +++ b/py_tests/phase0/block_processing/test_process_deposit.py @@ -1,9 +1,9 @@ from copy import deepcopy import pytest -import eth2.phase0.spec as spec +import pyspec.phase0.spec as spec -from eth2.phase0.spec import ( +from pyspec.phase0.spec import ( get_balance, ZERO_HASH, process_deposit, diff --git a/py_tests/phase0/block_processing/test_process_proposer_slashing.py b/py_tests/phase0/block_processing/test_process_proposer_slashing.py index b172cd988..3c3208b87 100644 --- a/py_tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/py_tests/phase0/block_processing/test_process_proposer_slashing.py @@ -1,8 +1,8 @@ from copy import deepcopy import pytest -import eth2.phase0.spec as spec -from eth2.phase0.spec import ( +import pyspec.phase0.spec as spec +from pyspec.phase0.spec import ( get_balance, get_current_epoch, process_proposer_slashing, diff --git a/py_tests/phase0/block_processing/test_voluntary_exit.py b/py_tests/phase0/block_processing/test_voluntary_exit.py index 32c747024..e7457126d 100644 --- a/py_tests/phase0/block_processing/test_voluntary_exit.py +++ b/py_tests/phase0/block_processing/test_voluntary_exit.py @@ -1,9 +1,9 @@ from copy import deepcopy import pytest -import eth2.phase0.spec as spec +import pyspec.phase0.spec as spec -from eth2.phase0.spec import ( +from pyspec.phase0.spec import ( get_active_validator_indices, get_current_epoch, process_voluntary_exit, diff --git a/py_tests/phase0/conftest.py b/py_tests/phase0/conftest.py index 0def66ad6..fb866160a 100644 --- a/py_tests/phase0/conftest.py +++ b/py_tests/phase0/conftest.py @@ -1,6 +1,6 @@ import pytest -from eth2.phase0 import spec +from pyspec.phase0 import spec from .helpers import ( create_genesis_state, diff --git a/py_tests/phase0/helpers.py b/py_tests/phase0/helpers.py index 1cf7b284b..523202361 100644 --- a/py_tests/phase0/helpers.py +++ b/py_tests/phase0/helpers.py @@ -2,9 +2,9 @@ from copy import deepcopy from py_ecc import bls -import eth2.phase0.spec as spec -from eth2.utils.minimal_ssz import signed_root -from eth2.phase0.spec import ( +import pyspec.phase0.spec as spec +from pyspec.utils.minimal_ssz import signed_root +from pyspec.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -33,7 +33,7 @@ from eth2.phase0.spec import ( verify_merkle_branch, hash, ) -from eth2.utils.merkle_minimal import ( +from pyspec.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/py_tests/phase0/test_sanity.py b/py_tests/phase0/test_sanity.py index 0af65ad51..71f86d2a6 100644 --- a/py_tests/phase0/test_sanity.py +++ b/py_tests/phase0/test_sanity.py @@ -3,10 +3,10 @@ from copy import deepcopy import pytest from py_ecc import bls -import eth2.phase0.spec as spec +import pyspec.phase0.spec as spec -from eth2.utils.minimal_ssz import signed_root -from eth2.phase0.spec import ( +from pyspec.utils.minimal_ssz import signed_root +from pyspec.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -27,10 +27,10 @@ from eth2.phase0.spec import ( verify_merkle_branch, hash, ) -from eth2.phase0.state_transition import ( +from pyspec.phase0.state_transition import ( state_transition, ) -from eth2.utils.merkle_minimal import ( +from pyspec.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index 9f33c5883..8ec57d56d 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -13,8 +13,8 @@ from typing import ( NewType, Tuple, ) -from eth2.utils.minimal_ssz import * -from eth2.utils.bls_stub import * +from pyspec.utils.minimal_ssz import * +from pyspec.utils.bls_stub import * """) diff --git a/test_generators/README.md b/test_generators/README.md index f4dca7977..51cca6561 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -113,7 +113,7 @@ if __name__ == "__main__": And to use the pyspec: ``` -from eth2.phase0 import spec +from pyspec.phase0 import spec ``` Recommendations: diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index ee6bc5325..11ffa835a 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -13,13 +13,13 @@ All the dynamic parts of the spec can be build at once with `make pyspec`. Alternatively, you can build a sub-set of the pyspec: `make phase0`. -Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2/phase0/spec.py` +Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/pyspec/phase0/spec.py` ## Contributing Contributions are welcome, but consider implementing your idea as part of the spec itself first. The pyspec is not a replacement. -If you see opportunity to include any of the `eth2/utils/` code in the spec, +If you see opportunity to include any of the `pyspec/utils/` code in the spec, please submit an issue or PR. ## License diff --git a/test_libs/pyspec/eth2/__init__.py b/test_libs/pyspec/pyspec/__init__.py similarity index 100% rename from test_libs/pyspec/eth2/__init__.py rename to test_libs/pyspec/pyspec/__init__.py diff --git a/test_libs/pyspec/eth2/debug/__init__.py b/test_libs/pyspec/pyspec/debug/__init__.py similarity index 100% rename from test_libs/pyspec/eth2/debug/__init__.py rename to test_libs/pyspec/pyspec/debug/__init__.py diff --git a/test_libs/pyspec/eth2/debug/jsonize.py b/test_libs/pyspec/pyspec/debug/jsonize.py similarity index 97% rename from test_libs/pyspec/eth2/debug/jsonize.py rename to test_libs/pyspec/pyspec/debug/jsonize.py index 660e6b070..a77684543 100644 --- a/test_libs/pyspec/eth2/debug/jsonize.py +++ b/test_libs/pyspec/pyspec/debug/jsonize.py @@ -1,4 +1,4 @@ -from eth2.utils.minimal_ssz import hash_tree_root +from pyspec.utils.minimal_ssz import hash_tree_root def jsonize(value, typ, include_hash_tree_roots=False): diff --git a/test_libs/pyspec/eth2/phase0/__init__.py b/test_libs/pyspec/pyspec/phase0/__init__.py similarity index 100% rename from test_libs/pyspec/eth2/phase0/__init__.py rename to test_libs/pyspec/pyspec/phase0/__init__.py diff --git a/test_libs/pyspec/eth2/phase0/state_transition.py b/test_libs/pyspec/pyspec/phase0/state_transition.py similarity index 100% rename from test_libs/pyspec/eth2/phase0/state_transition.py rename to test_libs/pyspec/pyspec/phase0/state_transition.py diff --git a/test_libs/pyspec/eth2/utils/__init__.py b/test_libs/pyspec/pyspec/utils/__init__.py similarity index 100% rename from test_libs/pyspec/eth2/utils/__init__.py rename to test_libs/pyspec/pyspec/utils/__init__.py diff --git a/test_libs/pyspec/eth2/utils/bls_stub.py b/test_libs/pyspec/pyspec/utils/bls_stub.py similarity index 100% rename from test_libs/pyspec/eth2/utils/bls_stub.py rename to test_libs/pyspec/pyspec/utils/bls_stub.py diff --git a/test_libs/pyspec/eth2/utils/hash_function.py b/test_libs/pyspec/pyspec/utils/hash_function.py similarity index 100% rename from test_libs/pyspec/eth2/utils/hash_function.py rename to test_libs/pyspec/pyspec/utils/hash_function.py diff --git a/test_libs/pyspec/eth2/utils/merkle_minimal.py b/test_libs/pyspec/pyspec/utils/merkle_minimal.py similarity index 100% rename from test_libs/pyspec/eth2/utils/merkle_minimal.py rename to test_libs/pyspec/pyspec/utils/merkle_minimal.py diff --git a/test_libs/pyspec/eth2/utils/minimal_ssz.py b/test_libs/pyspec/pyspec/utils/minimal_ssz.py similarity index 100% rename from test_libs/pyspec/eth2/utils/minimal_ssz.py rename to test_libs/pyspec/pyspec/utils/minimal_ssz.py From 6f0f2a8f5373f94652f45ce3f36af47fa01ab6b8 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sat, 30 Mar 2019 16:21:09 -0700 Subject: [PATCH 146/481] Update the descriptive text to refer to the correct type of root --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d08828692..629d2990c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -942,7 +942,7 @@ def get_block_root(state: BeaconState, return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] ``` -`get_block_root(_, s)` should always return `hash_tree_root` of the block in the beacon chain at slot `s`, and `get_crosslink_committees_at_slot(_, s)` should not change unless the [validator](#dfn-validator) registry changes. +`get_block_root(_, s)` should always return `signed_root` of the block in the beacon chain at slot `s`, and `get_crosslink_committees_at_slot(_, s)` should not change unless the [validator](#dfn-validator) registry changes. ### `get_state_root` From 15498f22ef5c4ea9afb371a90e74040bb7e08c48 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 30 Mar 2019 19:26:44 -0500 Subject: [PATCH 147/481] Fixed exit epoch conditional --- specs/core/0_beacon-chain.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 16141b399..e5102c56d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1390,9 +1390,8 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: state.exit_queue_filled = 0 # Set validator exit epoch and withdrawable epoch - if validator.exit_epoch > state.exit_epoch: - validator.exit_epoch = state.exit_epoch - validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + validator.exit_epoch = state.exit_epoch + validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY # Extend queue state.exit_queue_filled += 1 From e4c3c556d5bd820698896c76325ac5013351102f Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 31 Mar 2019 09:02:10 +0400 Subject: [PATCH 148/481] Rename "transaction" to "operation" Fix #822. --- specs/core/0_beacon-chain.md | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d08828692..5be13cdb5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -18,7 +18,7 @@ - [Time parameters](#time-parameters) - [State list lengths](#state-list-lengths) - [Reward and penalty quotients](#reward-and-penalty-quotients) - - [Max transactions per block](#max-transactions-per-block) + - [Max operations per block](#max-operations-per-block) - [Signature domains](#signature-domains) - [Data structures](#data-structures) - [Misc dependencies](#misc-dependencies) @@ -34,7 +34,7 @@ - [`Validator`](#validator) - [`PendingAttestation`](#pendingattestation) - [`HistoricalBatch`](#historicalbatch) - - [Beacon transactions](#beacon-transactions) + - [Beacon operations](#beacon-operations) - [`ProposerSlashing`](#proposerslashing) - [`AttesterSlashing`](#attesterslashing) - [`Attestation`](#attestation) @@ -132,7 +132,7 @@ - [Block header](#block-header) - [RANDAO](#randao) - [Eth1 data](#eth1-data-1) - - [Transactions](#transactions) + - [Operations](#operations) - [Proposer slashings](#proposer-slashings) - [Attester slashings](#attester-slashings) - [Attestations](#attestations) @@ -261,8 +261,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. * The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch. * The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. - -### Max transactions per block +### Max operations per block | Name | Value | | - | - | @@ -460,7 +459,7 @@ The types are defined topologically to aid in facilitating an executable version } ``` -### Beacon transactions +### Beacon operations #### `ProposerSlashing` @@ -2234,7 +2233,7 @@ def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) ``` -#### Transactions +#### Operations ##### Proposer slashings @@ -2246,7 +2245,7 @@ For each `proposer_slashing` in `block.body.proposer_slashings`, run the followi def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: """ - Process ``ProposerSlashing`` transaction. + Process ``ProposerSlashing`` operation. Note that this function mutates ``state``. """ proposer = state.validator_registry[proposer_slashing.proposer_index] @@ -2277,7 +2276,7 @@ For each `attester_slashing` in `block.body.attester_slashings`, run the followi def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None: """ - Process ``AttesterSlashing`` transaction. + Process ``AttesterSlashing`` operation. Note that this function mutates ``state``. """ attestation1 = attester_slashing.attestation_1 @@ -2312,7 +2311,7 @@ For each `attestation` in `block.body.attestations`, run the following function: ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: """ - Process ``Attestation`` transaction. + Process ``Attestation`` operation. Note that this function mutates ``state``. """ assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot @@ -2367,7 +2366,7 @@ For each `exit` in `block.body.voluntary_exits`, run the following function: ```python def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: """ - Process ``VoluntaryExit`` transaction. + Process ``VoluntaryExit`` operation. Note that this function mutates ``state``. """ validator = state.validator_registry[exit.validator_index] @@ -2403,7 +2402,7 @@ For each `transfer` in `block.body.transfers`, run the following function: ```python def process_transfer(state: BeaconState, transfer: Transfer) -> None: """ - Process ``Transfer`` transaction. + Process ``Transfer`` operation. Note that this function mutates ``state``. """ # Verify the amount and fee aren't individually too big (for anti-overflow purposes) From b6be9e1830f9fff278c2d247cfce3d843be8e8f7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 31 Mar 2019 04:55:24 -0500 Subject: [PATCH 149/481] Possible aesthetic rework to get_domain In general I dislike how domains, which should be an unobtrusive out-of-the-way thing that we don't think about much, are taking up so much space in code to express, to the point of them being the single thing preventing `bls_verify` from being expressed in one line of code. Here I reorder arguments and add a default, and make `bls_verify` a one-liner. Not necessarily convinced that exactly this approach is the way to go, but IMO it's worth considering. --- specs/core/0_beacon-chain.md | 60 +++++++++--------------------------- 1 file changed, 14 insertions(+), 46 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d08828692..6e2ebbcba 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1121,13 +1121,14 @@ def get_fork_version(fork: Fork, ### `get_domain` ```python -def get_domain(fork: Fork, - epoch: Epoch, - domain_type: int) -> int: +def get_domain(state: BeaconState, + domain_type: int, + epoch=None: int) -> int: """ Get the domain number that represents the fork meta and signature domain. """ - return bytes_to_int(get_fork_version(fork, epoch) + int_to_bytes4(domain_type)) + epoch_of_message = get_current_epoch(state) if epoch is None else epoch + return bytes_to_int(get_fork_version(fork, epoch_of_message) + int_to_bytes4(domain_type)) ``` ### `get_bitfield_bit` @@ -1210,7 +1211,7 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)), ], signature=indexed_attestation.aggregate_signature, - domain=get_domain(state.fork, slot_to_epoch(indexed_attestation.data.slot), DOMAIN_ATTESTATION), + domain=get_domain(state, DOMAIN_ATTESTATION, slot_to_epoch(indexed_attestation.data.slot)), ) ``` @@ -1316,17 +1317,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: if pubkey not in validator_pubkeys: # Verify the proof of possession - proof_is_valid = bls_verify( - pubkey=pubkey, - message_hash=signed_root(deposit.data), - signature=deposit.data.proof_of_possession, - domain=get_domain( - state.fork, - get_current_epoch(state), - DOMAIN_DEPOSIT, - ) - ) - if not proof_is_valid: + if not bls_verify(pubkey, signed_root(deposit.data), deposit.data.proof_of_possession, get_domain(state, DOMAIN_DEPOSIT)): return # Add new validator @@ -2194,12 +2185,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] assert not proposer.slashed # Verify proposer signature - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=signed_root(block), - signature=block.signature, - domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_BEACON_BLOCK) - ) + assert bls_verify(proposer.pubkey, signed_root(block), block.signature, get_domain(state, DOMAIN_BEACON_BLOCK)) ``` #### RANDAO @@ -2208,12 +2194,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: def process_randao(state: BeaconState, block: BeaconBlock) -> None: proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] # Verify that the provided randao value is valid - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=hash_tree_root(get_current_epoch(state)), - signature=block.body.randao_reveal, - domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO) - ) + assert bls_verify(proposer.pubkey, hash_tree_root(get_current_epoch(state)), block.body.randao_reveal, get_domain(state, DOMAIN_RANDAO)) # Mix it in state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( xor(get_randao_mix(state, get_current_epoch(state)), @@ -2258,12 +2239,8 @@ def process_proposer_slashing(state: BeaconState, assert is_slashable_validator(proposer, get_current_epoch(state)) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): - assert bls_verify( - pubkey=proposer.pubkey, - message_hash=signed_root(header), - signature=header.signature, - domain=get_domain(state.fork, slot_to_epoch(header.slot), DOMAIN_BEACON_BLOCK) - ) + domain = get_domain(state, DOMAIN_BEACON_BLOCK, slot_to_epoch(header.slot)) + assert bls_verify(proposer.pubkey, signed_root(header), header.signature, domain) slash_validator(state, proposer_slashing.proposer_index) ``` @@ -2382,12 +2359,8 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: # Verify the validator has been active long enough assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature - assert bls_verify( - pubkey=validator.pubkey, - message_hash=signed_root(exit), - signature=exit.signature, - domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) - ) + domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch) + assert bls_verify(validator.pubkey, signed_root(exit), exit.signature, domain) # Initiate exit initiate_validator_exit(state, exit.validator_index) ``` @@ -2427,12 +2400,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] ) # Verify that the signature is valid - assert bls_verify( - pubkey=transfer.pubkey, - message_hash=signed_root(transfer), - signature=transfer.signature, - domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) - ) + assert bls_verify(transfer.pubkey, signed_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER)) # Process the transfer decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) increase_balance(state, transfer.recipient, transfer.amount) From 77b0a4188bf573c3174db26d36a0e2348a33b5a2 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 31 Mar 2019 22:54:46 +0800 Subject: [PATCH 150/481] Update `1_custody-game.md` as well --- specs/core/1_custody-game.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index fd754634e..e28536d34 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -13,7 +13,7 @@ - [Constants](#constants) - [Misc](#misc) - [Time parameters](#time-parameters) - - [Max transactions per block](#max-transactions-per-block) + - [Max operations per block](#max-operations-per-block) - [Signature domains](#signature-domains) - [Data structures](#data-structures) - [Custody objects](#custody-objects) @@ -33,7 +33,7 @@ - [`epoch_to_custody_period`](#epoch_to_custody_period) - [`verify_custody_key`](#verify_custody_key) - [Per-block processing](#per-block-processing) - - [Transactions](#transactions) + - [Operations](#operations) - [Custody reveals](#custody-reveals) - [Chunk challenges](#chunk-challenges) - [Bit challenges](#bit-challenges) @@ -79,7 +79,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether | `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | | `CUSTODY_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days | -### Max transactions per block +### Max operations per block | Name | Value | | - | - | @@ -259,9 +259,9 @@ def verify_custody_key(state: BeaconState, reveal: CustodyKeyReveal) -> bool: ## Per-block processing -### Transactions +### Operations -Add the following transactions to the per-block processing, in order the given below and after all other transactions in phase 0. +Add the following operations to the per-block processing, in order the given below and after all other operations in phase 0. #### Custody reveals From 93540f9662a94d23bb993765adf8df1b24ad3346 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 31 Mar 2019 23:00:21 +0800 Subject: [PATCH 151/481] Rename transaction to operation in tests --- utils/phase0/state_transition.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py index cfd941c42..2c420014f 100644 --- a/utils/phase0/state_transition.py +++ b/utils/phase0/state_transition.py @@ -22,31 +22,31 @@ def expected_deposit_count(state: BeaconState) -> int: ) -def process_transaction_type(state: BeaconState, - transactions: List[Any], - max_transactions: int, - tx_fn: Callable[[BeaconState, Any], None]) -> None: - assert len(transactions) <= max_transactions - for transaction in transactions: - tx_fn(state, transaction) +def process_operation_type(state: BeaconState, + operations: List[Any], + max_operations: int, + tx_fn: Callable[[BeaconState, Any], None]) -> None: + assert len(operations) <= max_operations + for operation in operations: + tx_fn(state, operation) -def process_transactions(state: BeaconState, block: BeaconBlock) -> None: - process_transaction_type( +def process_operations(state: BeaconState, block: BeaconBlock) -> None: + process_operation_type( state, block.body.proposer_slashings, spec.MAX_PROPOSER_SLASHINGS, spec.process_proposer_slashing, ) - process_transaction_type( + process_operation_type( state, block.body.attester_slashings, spec.MAX_ATTESTER_SLASHINGS, spec.process_attester_slashing, ) - process_transaction_type( + process_operation_type( state, block.body.attestations, spec.MAX_ATTESTATIONS, @@ -54,14 +54,14 @@ def process_transactions(state: BeaconState, block: BeaconBlock) -> None: ) assert len(block.body.deposits) == expected_deposit_count(state) - process_transaction_type( + process_operation_type( state, block.body.deposits, spec.MAX_DEPOSITS, spec.process_deposit, ) - process_transaction_type( + process_operation_type( state, block.body.voluntary_exits, spec.MAX_VOLUNTARY_EXITS, @@ -69,7 +69,7 @@ def process_transactions(state: BeaconState, block: BeaconBlock) -> None: ) assert len(block.body.transfers) == len(set(block.body.transfers)) - process_transaction_type( + process_operation_type( state, block.body.transfers, spec.MAX_TRANSFERS, @@ -84,7 +84,7 @@ def process_block(state: BeaconState, spec.process_randao(state, block) spec.process_eth1_data(state, block) - process_transactions(state, block) + process_operations(state, block) if verify_state_root: spec.verify_block_state_root(state, block) From 79d1f9fb76ae0c82a9e34159ddca63b59ab00fc6 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 31 Mar 2019 20:45:57 +0400 Subject: [PATCH 152/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6e2ebbcba..49bbe4d9a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -396,7 +396,7 @@ The types are defined topologically to aid in facilitating an executable version # Amount in Gwei 'amount': 'uint64', # Container self-signature - 'proof_of_possession': 'bytes96', + 'signature': 'bytes96', } ``` @@ -1123,7 +1123,7 @@ def get_fork_version(fork: Fork, ```python def get_domain(state: BeaconState, domain_type: int, - epoch=None: int) -> int: + epoch: int=None) -> int: """ Get the domain number that represents the fork meta and signature domain. """ @@ -1316,8 +1316,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: amount = deposit.data.amount if pubkey not in validator_pubkeys: - # Verify the proof of possession - if not bls_verify(pubkey, signed_root(deposit.data), deposit.data.proof_of_possession, get_domain(state, DOMAIN_DEPOSIT)): + # Verify the deposit signature (proof of possession) + if not bls_verify(pubkey, signed_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): return # Add new validator From c2edcebee3dedf6f6b143cfce9a8c03f6654b6be Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 31 Mar 2019 20:48:44 +0400 Subject: [PATCH 153/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 49bbe4d9a..0330ef1f0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -277,7 +277,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | | - | - | -| `DOMAIN_BEACON_BLOCK` | `0` | +| `DOMAIN_BEACON_PROPOSER` | `0` | | `DOMAIN_RANDAO` | `1` | | `DOMAIN_ATTESTATION` | `2` | | `DOMAIN_DEPOSIT` | `3` | @@ -2185,7 +2185,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] assert not proposer.slashed # Verify proposer signature - assert bls_verify(proposer.pubkey, signed_root(block), block.signature, get_domain(state, DOMAIN_BEACON_BLOCK)) + assert bls_verify(proposer.pubkey, signed_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER)) ``` #### RANDAO @@ -2239,7 +2239,7 @@ def process_proposer_slashing(state: BeaconState, assert is_slashable_validator(proposer, get_current_epoch(state)) # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): - domain = get_domain(state, DOMAIN_BEACON_BLOCK, slot_to_epoch(header.slot)) + domain = get_domain(state, DOMAIN_BEACON_PROPOSER, slot_to_epoch(header.slot)) assert bls_verify(proposer.pubkey, signed_root(header), header.signature, domain) slash_validator(state, proposer_slashing.proposer_index) ``` From bb990e9892ef078f5411835022c93ec52518de7d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 31 Mar 2019 17:49:02 -0500 Subject: [PATCH 154/481] Separate out get_switchover_epoch So that we can use it in for subkey reveals as well. --- specs/core/1_shard-data-chains.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 8f2d12a91..6452a37cd 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -20,6 +20,7 @@ - [`ShardAttestation`](#shardattestation) - [Helper functions](#helper-functions) - [`get_period_committee`](#get_period_committee) + - [`get_switchover_epoch`](#get_switchover_epoch) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) - [`get_shard_header`](#get_shard_header) @@ -137,6 +138,14 @@ def get_period_committee(state: BeaconState, ) ``` +### `get_switchover_epoch` + +```python +def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex): + earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 + return bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD +``` + ### `get_persistent_committee` ```python @@ -146,6 +155,7 @@ def get_persistent_committee(state: BeaconState, """ Return the persistent committee for the given ``shard`` at the given ``slot``. """ + epoch = slot_to_epoch(epoch) earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD @@ -160,14 +170,11 @@ def get_persistent_committee(state: BeaconState, earlier_committee = get_period_committee(state, shard, earlier_start_epoch, index, committee_count) later_committee = get_period_committee(state, shard, later_start_epoch, index, committee_count) - def get_switchover_epoch(index): - return bytes_to_int(hash(earlier_seed + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD - # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated return sorted(list(set( - [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + - [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] + [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(state, epoch, i)] + + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(state, epoch, i)] ))) ``` From 3fc24f3d415280484ff1c99813d060d39b242983 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 31 Mar 2019 21:20:43 -0500 Subject: [PATCH 155/481] Replace with empty instead of popping finished challenges --- specs/core/1_custody-game.md | 44 +++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index fd754634e..41ba9d953 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -31,6 +31,7 @@ - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) - [`get_custody_chunk_bit`](#get_custody_chunk_bit) - [`epoch_to_custody_period`](#epoch_to_custody_period) + - [`replace_empty_or_append`](#replace_empty_or_append) - [`verify_custody_key`](#verify_custody_key) - [Per-block processing](#per-block-processing) - [Transactions](#transactions) @@ -229,6 +230,18 @@ def epoch_to_custody_period(epoch: Epoch) -> int: return epoch // EPOCHS_PER_CUSTODY_PERIOD ``` +### `replace_empty_or_append` + +```python +def replace_empty_or_append(list: List[Any], empty_element: Any, new_element: Any) -> int: + for i in range(len(list)): + if list[i] == empty_element: + list[i] = new_element + return i + list.append(new_element) + return len(list) - 1 +``` + ### `verify_custody_key` ```python @@ -321,7 +334,7 @@ def process_chunk_challenge(state: BeaconState, depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation))) assert challenge.chunk_index < 2**depth # Add new chunk challenge record - state.custody_chunk_challenge_records.append(CustodyChunkChallengeRecord( + new_record = CustodyChunkChallengeRecord( challenge_index=state.custody_challenge_index, challenger_index=get_beacon_proposer_index(state, state.slot), responder_index=challenge.responder_index @@ -329,7 +342,13 @@ def process_chunk_challenge(state: BeaconState, crosslink_data_root=challenge.attestation.data.crosslink_data_root, depth=depth, chunk_index=challenge.chunk_index, - )) + ) + replace_empty_or_append( + list=state.custody_chunk_challenge_records, + empty_element=CustodyChunkChallengeRecord(), + new_element=new_record + ) + state.custody_challenge_index += 1 # Postpone responder withdrawability responder.withdrawable_epoch = FAR_FUTURE_EPOCH @@ -385,7 +404,7 @@ def process_bit_challenge(state: BeaconState, custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index)) assert custody_bit != chunk_bits_xor # Add new bit challenge record - state.custody_bit_challenge_records.append(CustodyBitChallengeRecord( + new_record = CustodyBitChallengeRecord( challenge_index=state.custody_challenge_index, challenger_index=challenge.challenger_index, responder_index=challenge.responder_index, @@ -393,7 +412,12 @@ def process_bit_challenge(state: BeaconState, crosslink_data_root=challenge.attestation.crosslink_data_root, chunk_bits=challenge.chunk_bits, responder_key=challenge.responder_key, - )) + ) + replace_empty_or_append( + list=state.custody_bit_challenge_records, + empty_element=CustodyBitChallengeRecord(), + new_element=new_record + ) state.custody_challenge_index += 1 # Postpone responder withdrawability responder.withdrawable_epoch = FAR_FUTURE_EPOCH @@ -434,7 +458,8 @@ def process_chunk_challenge_response(state: BeaconState, root=challenge.crosslink_data_root, ) # Clear the challenge - state.custody_chunk_challenge_records.remove(challenge) + records = state.custody_chunk_challenge_records + records[records.index(challenge)] = CustodyChunkChallengeRecord() # Reward the proposer proposer_index = get_beacon_proposer_index(state, state.slot) increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) @@ -457,7 +482,8 @@ def process_bit_challenge_response(state: BeaconState, # Verify the chunk bit does not match the challenge chunk bit assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index) # Clear the challenge - state.custody_bit_challenge_records.remove(challenge) + records = state.custody_bit_challenge_records + records[records.index(challenge)] = CustodyBitChallengeRecord() # Slash challenger slash_validator(state, challenge.challenger_index, challenge.responder_index) ``` @@ -471,12 +497,14 @@ def process_challenge_deadlines(state: BeaconState) -> None: for challenge in state.custody_chunk_challenge_records: if get_current_epoch(state) > challenge.deadline: slash_validator(state, challenge.responder_index, challenge.challenger_index) - state.custody_chunk_challenge_records.remove(challenge) + records = state.custody_chunk_challenge_records + records[records.index(challenge)] = CustodyChunkChallengeRecord() for challenge in state.custody_bit_challenge_records: if get_current_epoch(state) > challenge.deadline: slash_validator(state, challenge.responder_index, challenge.challenger_index) - state.custody_bit_challenge_records.remove(challenge) + records = state.custody_bit_challenge_records + records[records.index(challenge)] = CustodyBitChallengeRecord() ``` In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope): From 06d005999a3b8ebcde35ed89593e6efb573f3afc Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 2 Apr 2019 13:50:06 +1100 Subject: [PATCH 156/481] fix validator_indicies issue in process_attester_slashing --- specs/core/0_beacon-chain.md | 6 ++++-- tests/phase0/helpers.py | 13 +++++++++++++ tests/phase0/test_sanity.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d08828692..ced7a7210 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2291,10 +2291,12 @@ def process_attester_slashing(state: BeaconState, assert verify_indexed_attestation(state, attestation1) assert verify_indexed_attestation(state, attestation2) + validator_indices_1 = attestation1.custody_bit_0_indices + attestation1.custody_bit_1_indices + validator_indices_2 = attestation2.custody_bit_0_indices + attestation2.custody_bit_1_indices slashable_indices = [ - index for index in attestation1.validator_indices + index for index in validator_indices_1 if ( - index in attestation2.validator_indices and + index in validator_indices_2 and is_slashable_validator(state.validator_registry[index], get_current_epoch(state)) ) ] diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index e5e335d80..33f394def 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -12,6 +12,7 @@ from build.phase0.spec import ( Attestation, AttestationData, AttestationDataAndCustodyBit, + AttesterSlashing, BeaconBlockHeader, Deposit, DepositData, @@ -19,6 +20,7 @@ from build.phase0.spec import ( ProposerSlashing, VoluntaryExit, # functions + convert_to_indexed, get_active_validator_indices, get_attestation_participants, get_block_root, @@ -244,6 +246,17 @@ def get_valid_proposer_slashing(state): ) +def get_valid_attester_slashing(state): + attestation_1 = get_valid_attestation(state) + attestation_2 = deepcopy(attestation_1) + attestation_2.data.target_root = b'\x01'*32 + + return AttesterSlashing( + attestation_1=convert_to_indexed(state, attestation_1), + attestation_2=convert_to_indexed(state, attestation_2), + ) + + def get_valid_attestation(state, slot=None): if slot is None: slot = state.slot diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 3b4497ca5..8e6bd2e94 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -40,6 +40,7 @@ from tests.phase0.helpers import ( build_empty_block_for_next_slot, force_registry_change_at_next_epoch, get_valid_attestation, + get_valid_attester_slashing, get_valid_proposer_slashing, privkeys, pubkeys, @@ -140,6 +141,33 @@ def test_proposer_slashing(state): return state, [block], test_state +def test_attester_slashing(state): + test_state = deepcopy(state) + attester_slashing = get_valid_attester_slashing(state) + validator_index = attester_slashing.attestation_1.custody_bit_0_indices[0] + + # + # Add to state via block transition + # + block = build_empty_block_for_next_slot(test_state) + block.body.attester_slashings.append(attester_slashing) + state_transition(test_state, block) + + assert not state.validator_registry[validator_index].initiated_exit + assert not state.validator_registry[validator_index].slashed + + slashed_validator = test_state.validator_registry[validator_index] + assert not slashed_validator.initiated_exit + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + # lost whistleblower reward + assert get_balance(test_state, validator_index) < get_balance(state, validator_index) + + return state, [block], test_state + + + def test_deposit_in_block(state): pre_state = deepcopy(state) test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) From e037412f94d7c24477cd7723470e1735b866d920 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 2 Apr 2019 14:04:04 +1100 Subject: [PATCH 157/481] add process attester slashing tests --- .../test_process_attester_slashing.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 tests/phase0/block_processing/test_process_attester_slashing.py diff --git a/tests/phase0/block_processing/test_process_attester_slashing.py b/tests/phase0/block_processing/test_process_attester_slashing.py new file mode 100644 index 000000000..ed6d9adae --- /dev/null +++ b/tests/phase0/block_processing/test_process_attester_slashing.py @@ -0,0 +1,97 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec +from build.phase0.spec import ( + get_balance, + get_current_epoch, + process_attester_slashing, +) +from tests.phase0.helpers import ( + get_valid_attester_slashing, +) + +# mark entire file as 'attester_slashing' +pytestmark = pytest.mark.attester_slashings + + +def run_attester_slashing_processing(state, attester_slashing, valid=True): + """ + Run ``process_attester_slashing`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_attester_slashing(post_state, attester_slashing) + return state, None + + process_attester_slashing(post_state, attester_slashing) + + validator_index = attester_slashing.attestation_1.custody_bit_0_indices[0] + slashed_validator = post_state.validator_registry[validator_index] + assert not slashed_validator.initiated_exit + assert slashed_validator.slashed + assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH + assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH + # lost whistleblower reward + assert ( + get_balance(post_state, validator_index) < + get_balance(state, validator_index) + ) + + return state, post_state + + +def test_success_double(state): + attester_slashing = get_valid_attester_slashing(state) + + pre_state, post_state = run_attester_slashing_processing(state, attester_slashing) + + return pre_state, attester_slashing, post_state + + +def test_success_surround(state): + attester_slashing = get_valid_attester_slashing(state) + + # set attestion1 to surround attestation 2 + attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1 + attester_slashing.attestation_1.data.slot = attester_slashing.attestation_2.data.slot + spec.SLOTS_PER_EPOCH + + pre_state, post_state = run_attester_slashing_processing(state, attester_slashing) + + return pre_state, attester_slashing, post_state + + +def test_same_data(state): + attester_slashing = get_valid_attester_slashing(state) + + attester_slashing.attestation_1.data = attester_slashing.attestation_2.data + + pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False) + + return pre_state, attester_slashing, post_state + + +def test_no_double_or_surround(state): + attester_slashing = get_valid_attester_slashing(state) + + attester_slashing.attestation_1.data.slot += spec.SLOTS_PER_EPOCH + + pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False) + + return pre_state, attester_slashing, post_state + +def test_participants_already_slashed(state): + attester_slashing = get_valid_attester_slashing(state) + + # set all indices to slashed + attestation_1 = attester_slashing.attestation_1 + validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices + for index in validator_indices: + state.validator_registry[index].slashed = True + + pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False) + + return pre_state, attester_slashing, post_state From 577fc740d09dcbb66848e84dbb141b75ce6952df Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 2 Apr 2019 14:08:22 +1100 Subject: [PATCH 158/481] lint --- tests/phase0/block_processing/test_process_attester_slashing.py | 1 + tests/phase0/test_sanity.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/phase0/block_processing/test_process_attester_slashing.py b/tests/phase0/block_processing/test_process_attester_slashing.py index ed6d9adae..fcbc8f78b 100644 --- a/tests/phase0/block_processing/test_process_attester_slashing.py +++ b/tests/phase0/block_processing/test_process_attester_slashing.py @@ -83,6 +83,7 @@ def test_no_double_or_surround(state): return pre_state, attester_slashing, post_state + def test_participants_already_slashed(state): attester_slashing = get_valid_attester_slashing(state) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 8e6bd2e94..0010cb22f 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -167,7 +167,6 @@ def test_attester_slashing(state): return state, [block], test_state - def test_deposit_in_block(state): pre_state = deepcopy(state) test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) From 529cf4223e221bb8e782f76eb7b0c22aa051e7bc Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 2 Apr 2019 16:00:36 +1100 Subject: [PATCH 159/481] add previous and current crosslinks --- specs/core/0_beacon-chain.md | 38 ++++++++++--------- .../test_process_attestation.py | 2 +- .../test_process_proposer_slashing.py | 2 +- tests/phase0/conftest.py | 3 +- tests/phase0/helpers.py | 7 +--- tests/phase0/test_sanity.py | 6 ++- 6 files changed, 31 insertions(+), 27 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d08828692..1032a905a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -610,7 +610,8 @@ The types are defined topologically to aid in facilitating an executable version 'finalized_root': 'bytes32', # Recent state - 'latest_crosslinks': [Crosslink, SHARD_COUNT], + 'current_crosslinks': [Crosslink, SHARD_COUNT], + 'previous_crosslinks': [Crosslink, SHARD_COUNT], 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], @@ -1554,7 +1555,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], finalized_root=ZERO_HASH, # Recent state - latest_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), @@ -1758,10 +1760,14 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe **Note**: Total balances computed for the previous epoch might be marginally different than the actual total balances during the previous epoch transition. Due to the tight bound on validator churn each epoch and small per-epoch rewards/penalties, the potential balance difference is very low and only marginally affects consensus safety. ```python -def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: +def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations + crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks + valid_attestations = [ - a for a in all_attestations if a.data.previous_crosslink == state.latest_crosslinks[shard] + a for a in all_attestations + if a.data.previous_crosslink == crosslinks[shard] and + a.data.shard == shard and a.data.slot == slot ] all_roots = [a.data.crosslink_data_root for a in valid_attestations] @@ -1856,16 +1862,20 @@ def process_crosslinks(state: BeaconState) -> None: current_epoch = get_current_epoch(state) previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) next_epoch = current_epoch + 1 + next_previous_crosslinks = [crosslink for crosslink in state.current_crosslinks] + for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) + winning_root, participants = get_winning_root_and_participants(state, slot, shard) participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) if 3 * participating_balance >= 2 * total_balance: - state.latest_crosslinks[shard] = Crosslink( - epoch=min(slot_to_epoch(slot), state.latest_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=winning_root + state.current_crosslinks[shard] = Crosslink( + epoch=min(slot_to_epoch(slot), state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), + crosslink_data_root=winning_root, ) + + state.previous_crosslinks = next_previous_crosslinks ``` #### Eth1 data @@ -1972,7 +1982,7 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) for slot in range(previous_epoch_start_slot, current_epoch_start_slot): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) + winning_root, participants = get_winning_root_and_participants(state, slot, shard) participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: @@ -2327,14 +2337,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check crosslink data assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] - assert state.latest_crosslinks[attestation.data.shard] in { - attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink - Crosslink( # Case 2: latest crosslink matches current crosslink - crosslink_data_root=attestation.data.crosslink_data_root, - epoch=min(slot_to_epoch(attestation.data.slot), - attestation.data.previous_crosslink.epoch + MAX_CROSSLINK_EPOCHS) - ), - } + crosslinks = state.current_crosslinks if slot_to_epoch(attestation.data.slot) == get_current_epoch(state) else state.previous_crosslinks + assert crosslinks[attestation.data.shard] == attestation.data.previous_crosslink # Check signature and bitfields assert verify_indexed_attestation(state, convert_to_indexed(state, attestation)) diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index ca6933ce7..c946feb05 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -124,7 +124,7 @@ def test_bad_previous_crosslink(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - state.latest_crosslinks[attestation.data.shard].epoch += 10 + state.current_crosslinks[attestation.data.shard].epoch += 10 pre_state, post_state = run_attestation_processing(state, attestation, False) diff --git a/tests/phase0/block_processing/test_process_proposer_slashing.py b/tests/phase0/block_processing/test_process_proposer_slashing.py index 467d2164b..b5aedc8bb 100644 --- a/tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/tests/phase0/block_processing/test_process_proposer_slashing.py @@ -11,7 +11,7 @@ from tests.phase0.helpers import ( get_valid_proposer_slashing, ) -# mark entire file as 'header' +# mark entire file as 'proposer_slashings' pytestmark = pytest.mark.proposer_slashings diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index 36a087941..809d1239e 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -28,7 +28,8 @@ def overwrite_spec_config(config): if field == "LATEST_RANDAO_MIXES_LENGTH": spec.BeaconState.fields['latest_randao_mixes'][1] = config[field] elif field == "SHARD_COUNT": - spec.BeaconState.fields['latest_crosslinks'][1] = config[field] + spec.BeaconState.fields['current_crosslinks'][1] = config[field] + spec.BeaconState.fields['previous_crosslinks'][1] = config[field] elif field == "SLOTS_PER_HISTORICAL_ROOT": spec.BeaconState.fields['latest_block_roots'][1] = config[field] spec.BeaconState.fields['latest_state_roots'][1] = config[field] diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index e5e335d80..083d31b80 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -95,10 +95,6 @@ def create_genesis_state(num_validators, deposit_data_leaves=None): def force_registry_change_at_next_epoch(state): - # artificially trigger registry update at next epoch transition - state.finalized_epoch = get_current_epoch(state) - 1 - for crosslink in state.latest_crosslinks: - crosslink.epoch = state.finalized_epoch state.validator_registry_update_epoch = state.finalized_epoch - 1 @@ -149,6 +145,7 @@ def build_attestation_data(state, slot, shard): else: justified_block_root = state.current_justified_root + crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks return AttestationData( slot=slot, shard=shard, @@ -157,7 +154,7 @@ def build_attestation_data(state, slot, shard): source_root=justified_block_root, target_root=epoch_boundary_root, crosslink_data_root=spec.ZERO_HASH, - previous_crosslink=deepcopy(state.latest_crosslinks[shard]), + previous_crosslink=deepcopy(crosslinks[shard]), ) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 3b4497ca5..a7a6b9961 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -24,6 +24,7 @@ from build.phase0.spec import ( advance_slot, cache_state, set_balance, + slot_to_epoch, verify_merkle_branch, hash, ) @@ -254,6 +255,7 @@ def test_voluntary_exit(state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + pre_state.finalized_epoch = slot_to_epoch(pre_state.slot) - 3 # artificially trigger registry update at next epoch transition force_registry_change_at_next_epoch(pre_state) @@ -309,12 +311,12 @@ def test_no_exit_churn_too_long_since_change(state): # # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - # artificially trigger registry update at next epoch transition - force_registry_change_at_next_epoch(pre_state) # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH pre_state.validator_registry_update_epoch = ( get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH ) + # artificially trigger registry update at next epoch transition + force_registry_change_at_next_epoch(pre_state) # set validator to have previously initiated exit pre_state.validator_registry[validator_index].initiated_exit = True From c4321c7cc734ad4fe84e21e9bcd64c3e7fe0f392 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 2 Apr 2019 22:17:55 +0400 Subject: [PATCH 160/481] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 6452a37cd..551205cb3 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -155,7 +155,7 @@ def get_persistent_committee(state: BeaconState, """ Return the persistent committee for the given ``shard`` at the given ``slot``. """ - epoch = slot_to_epoch(epoch) + epoch = slot_to_epoch(slot) earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD From 06ba5fedd7886d57b9794c4c61af33de040f8aa7 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 2 Apr 2019 13:18:41 -0500 Subject: [PATCH 161/481] Add link to custody game to readme (#867) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c5c88daf9..ce0ae8738 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ This repo hosts the current eth2.0 specifications. Discussions about design rati Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: * [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md) +* [Phase 1 -- Custody game](specs/core/1_custody-game.md) * [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) Accompanying documents can be found in [specs](specs) and include From 37fc79cb94cc26959f0cb96691583cf4569f08ea Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 2 Apr 2019 22:30:26 +0400 Subject: [PATCH 162/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0330ef1f0..094e7e49d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -83,7 +83,6 @@ - [`bytes_to_int`](#bytes_to_int) - [`get_effective_balance`](#get_effective_balance) - [`get_total_balance`](#get_total_balance) - - [`get_fork_version`](#get_fork_version) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) - [`verify_bitfield`](#verify_bitfield) @@ -1104,31 +1103,18 @@ def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> G return sum([get_effective_balance(state, i) for i in validators]) ``` -### `get_fork_version` - -```python -def get_fork_version(fork: Fork, - epoch: Epoch) -> bytes: - """ - Return the fork version of the given ``epoch``. - """ - if epoch < fork.epoch: - return fork.previous_version - else: - return fork.current_version -``` - ### `get_domain` ```python def get_domain(state: BeaconState, domain_type: int, - epoch: int=None) -> int: + message_epoch: int=None) -> int: """ - Get the domain number that represents the fork meta and signature domain. + Return the signature domain (fork version concatenated with domain type) of a message. """ - epoch_of_message = get_current_epoch(state) if epoch is None else epoch - return bytes_to_int(get_fork_version(fork, epoch_of_message) + int_to_bytes4(domain_type)) + epoch = get_current_epoch(state) if message_epoch is None else message_epoch + fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version + return bytes_to_int(fork_version + int_to_bytes4(domain_type)) ``` ### `get_bitfield_bit` From d8df789a7058383176ed1c4447f15cbaa7d6c042 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 10:13:05 +1100 Subject: [PATCH 163/481] simplify get_winning_root logic --- specs/core/0_beacon-chain.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1032a905a..fa0c97acb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1761,13 +1761,12 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe ```python def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: - all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations + attestations = state.current_epoch_attestations if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_epoch_attestations crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks valid_attestations = [ - a for a in all_attestations - if a.data.previous_crosslink == crosslinks[shard] and - a.data.shard == shard and a.data.slot == slot + a for a in attestations + if a.data.previous_crosslink == crosslinks[shard] and a.data.shard == shard ] all_roots = [a.data.crosslink_data_root for a in valid_attestations] From 014138baab78554e77e18620a9ba450787d800f1 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 11:04:12 +1100 Subject: [PATCH 164/481] pr feedback --- specs/core/0_beacon-chain.md | 11 +++++--- .../test_process_attester_slashing.py | 27 +++++++++++++++---- tests/phase0/test_sanity.py | 8 ++++++ 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ced7a7210..8b3f51c0a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1187,6 +1187,9 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA custody_bit_0_indices = indexed_attestation.custody_bit_0_indices custody_bit_1_indices = indexed_attestation.custody_bit_1_indices + # ensure no duplicate indices across custody bits + assert len(set(custody_bit_0_indices).intersection(set(custody_bit_1_indices))) == 0 + if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] return False @@ -2291,12 +2294,12 @@ def process_attester_slashing(state: BeaconState, assert verify_indexed_attestation(state, attestation1) assert verify_indexed_attestation(state, attestation2) - validator_indices_1 = attestation1.custody_bit_0_indices + attestation1.custody_bit_1_indices - validator_indices_2 = attestation2.custody_bit_0_indices + attestation2.custody_bit_1_indices + attesting_indices_1 = attestation1.custody_bit_0_indices + attestation1.custody_bit_1_indices + attesting_indices_2 = attestation2.custody_bit_0_indices + attestation2.custody_bit_1_indices slashable_indices = [ - index for index in validator_indices_1 + index for index in attesting_indices_1 if ( - index in validator_indices_2 and + index in attesting_indices_2 and is_slashable_validator(state.validator_registry[index], get_current_epoch(state)) ) ] diff --git a/tests/phase0/block_processing/test_process_attester_slashing.py b/tests/phase0/block_processing/test_process_attester_slashing.py index fcbc8f78b..06f214c4b 100644 --- a/tests/phase0/block_processing/test_process_attester_slashing.py +++ b/tests/phase0/block_processing/test_process_attester_slashing.py @@ -4,7 +4,7 @@ import pytest import build.phase0.spec as spec from build.phase0.spec import ( get_balance, - get_current_epoch, + get_beacon_proposer_index, process_attester_slashing, ) from tests.phase0.helpers import ( @@ -29,16 +29,22 @@ def run_attester_slashing_processing(state, attester_slashing, valid=True): process_attester_slashing(post_state, attester_slashing) - validator_index = attester_slashing.attestation_1.custody_bit_0_indices[0] - slashed_validator = post_state.validator_registry[validator_index] + slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] + slashed_validator = post_state.validator_registry[slashed_index] assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH # lost whistleblower reward assert ( - get_balance(post_state, validator_index) < - get_balance(state, validator_index) + get_balance(post_state, slashed_index) < + get_balance(state, slashed_index) + ) + proposer_index = get_beacon_proposer_index(state, state.slot) + # gained whistleblower reward + assert ( + get_balance(post_state, proposer_index) > + get_balance(state, proposer_index) ) return state, post_state @@ -96,3 +102,14 @@ def test_participants_already_slashed(state): pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False) return pre_state, attester_slashing, post_state + + +def test_custody_bit_0_and_1(state): + attester_slashing = get_valid_attester_slashing(state) + + attester_slashing.attestation_1.custody_bit_1_indices = ( + attester_slashing.attestation_1.custody_bit_0_indices + ) + pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False) + + return pre_state, attester_slashing, post_state diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 0010cb22f..90825242f 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -17,6 +17,7 @@ from build.phase0.spec import ( # functions get_active_validator_indices, get_balance, + get_beacon_proposer_index, get_block_root, get_current_epoch, get_domain, @@ -164,6 +165,13 @@ def test_attester_slashing(state): # lost whistleblower reward assert get_balance(test_state, validator_index) < get_balance(state, validator_index) + proposer_index = get_beacon_proposer_index(test_state, test_state.slot) + # gained whistleblower reward + assert ( + get_balance(test_state, proposer_index) > + get_balance(state, proposer_index) + ) + return state, [block], test_state From dd520c6162a3a9efc72923ea7d89bf9acf338ce3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 11:22:14 +1100 Subject: [PATCH 165/481] fix tests for get_domain PR --- tests/phase0/helpers.py | 26 ++++++++++++-------------- tests/phase0/test_sanity.py | 6 ++---- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 33f394def..6a7ffd5dd 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -50,7 +50,7 @@ pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkey def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): if not deposit_data_leaves: deposit_data_leaves = [] - proof_of_possession = b'\x33' * 96 + signature = b'\x33' * 96 deposit_data_list = [] for i in range(num_validators): @@ -60,7 +60,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=N # insecurely use pubkey as withdrawal key as well withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], amount=spec.MAX_DEPOSIT_AMOUNT, - proof_of_possession=proof_of_possession, + signature=signature, ) item = hash(deposit_data.serialize()) deposit_data_leaves.append(item) @@ -120,18 +120,17 @@ def build_deposit_data(state, pubkey, privkey, amount): # insecurely use pubkey as withdrawal key as well withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], amount=amount, - proof_of_possession=EMPTY_SIGNATURE, + signature=EMPTY_SIGNATURE, ) - proof_of_possession = bls.sign( + signature = bls.sign( message_hash=signed_root(deposit_data), privkey=privkey, domain=get_domain( - state.fork, - get_current_epoch(state), + state, spec.DOMAIN_DEPOSIT, ) ) - deposit_data.proof_of_possession = proof_of_possession + deposit_data.signature = signature return deposit_data @@ -173,9 +172,9 @@ def build_voluntary_exit(state, epoch, validator_index, privkey): message_hash=signed_root(voluntary_exit), privkey=privkey, domain=get_domain( - fork=state.fork, - epoch=epoch, + state=state, domain_type=spec.DOMAIN_VOLUNTARY_EXIT, + message_epoch=epoch, ) ) @@ -224,9 +223,8 @@ def get_valid_proposer_slashing(state): header_2.slot = slot + 1 domain = get_domain( - fork=state.fork, - epoch=get_current_epoch(state), - domain_type=spec.DOMAIN_BEACON_BLOCK, + state=state, + domain_type=spec.DOMAIN_BEACON_PROPOSER, ) header_1.signature = bls.sign( message_hash=signed_root(header_1), @@ -307,8 +305,8 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) message_hash=message_hash, privkey=privkey, domain=get_domain( - fork=state.fork, - epoch=slot_to_epoch(attestation_data.slot), + state=state, domain_type=spec.DOMAIN_ATTESTATION, + message_epoch=slot_to_epoch(attestation_data.slot), ) ) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 90825242f..b86187ec8 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -303,8 +303,7 @@ def test_voluntary_exit(state): message_hash=signed_root(voluntary_exit), privkey=privkeys[validator_index], domain=get_domain( - fork=pre_state.fork, - epoch=get_current_epoch(pre_state), + state=pre_state, domain_type=spec.DOMAIN_VOLUNTARY_EXIT, ) ) @@ -390,8 +389,7 @@ def test_transfer(state): message_hash=signed_root(transfer), privkey=transfer_privkey, domain=get_domain( - fork=pre_state.fork, - epoch=get_current_epoch(pre_state), + state=pre_state, domain_type=spec.DOMAIN_TRANSFER, ) ) From 80067721e29a8aa718817ab454aa81f9fd83a98e Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 3 Apr 2019 12:29:34 +1100 Subject: [PATCH 166/481] A new more complete test format for ETH 2.0 testing --- specs/test-format.md | 71 --------------- specs/test_formats/README.md | 168 +++++++++++++++++++++++++++++++++++ 2 files changed, 168 insertions(+), 71 deletions(-) delete mode 100644 specs/test-format.md create mode 100644 specs/test_formats/README.md diff --git a/specs/test-format.md b/specs/test-format.md deleted file mode 100644 index d4256ef72..000000000 --- a/specs/test-format.md +++ /dev/null @@ -1,71 +0,0 @@ -# General test format [WIP] - -This document defines the general YAML format to which all tests should conform. Testing specifications in Eth2.0 are still a work in progress. _Expect breaking changes_ - -## ToC - -* [About](#about) -* [YAML Fields](#yaml-fields) -* [Example test suite](#example-test-suite) - -## About -Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform. - -The particular formats of specific types of tests (test suites) are defined in separate documents. - -## YAML fields -`title` _(required)_ - -`summary` _(optional)_ - -`test_suite` _(required)_ string defining the test suite to which the test cases conform - -`fork` _(required)_ production release versioning - -`version` _(required)_ version for particular test document - -`test_cases` _(required)_ list of test cases each of which is formatted to conform to the `test_case` standard defined by `test_suite`. All test cases have optional `name` and `description` string fields. - -## Example test suite -`shuffle` is a test suite that defines test cases for the `shuffle()` helper function defined in the `beacon-chain` spec. - -Test cases that conform to the `shuffle` test suite have the following fields: - -* `input` _(required)_ the list of items passed into `shuffle()` -* `output` _(required)_ the expected list returned by `shuffle()` -* `seed` _(required)_ the seed of entropy passed into `shuffle()` - -As for all test cases, `name` and `description` are optional string fields. - -The following is a sample YAML document for the `shuffle` test suite: - -```yaml -title: Shuffling Algorithm Tests -summary: Test vectors for shuffling a list based upon a seed using `shuffle` -test_suite: shuffle -fork: tchaikovsky -version: 1.0 - -test_cases: -- input: [] - output: [] - seed: !!binary "" -- name: boring_list - description: List with a single element, 0 - input: [0] - output: [0] - seed: !!binary "" -- input: [255] - output: [255] - seed: !!binary "" -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5] - seed: !!binary "" -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5] - seed: !!binary "" -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -``` diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md new file mode 100644 index 000000000..fec018b4a --- /dev/null +++ b/specs/test_formats/README.md @@ -0,0 +1,168 @@ +# General test format + +This document defines the YAML format and structure used for ETH 2.0 testing. + +## ToC + +* [About](#about) +* [Glossary](#glossary) +* [Test format philosophy](#test-format-philosophy) +* [Test Suite](#yaml-suite) +* [Config](#config) +* [Fork-timeline](#fork-timeline) +* [Config sourcing](#config-sourcing) +* [Test structure](#test-structure) + +## About + +Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform. + +The particular formats of specific types of tests (test suites) are defined in separate documents. + +## Glossary + +- `generator`: a program that outputs one or more `suite` files. +- `type`: the specialization of one single `generator`. +- `suite`: a YAML file with: + - a header: describes the `suite`, and defines what the `suite` is for + - a list of test cases +- `runner`: where a generator is a "producer", this is the 1-to-1 "consumer". A `runner` focuses on one `type`. +- `handler`: a `runner` may be too limited sometimes, you may have a `suite` with a specific focus that requires a different format. + To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler. + Using a `handler` in a `runner` is optional. +- `case`: a test case, an entry in the `test_cases` list of a `suite`. A case can be anything in general, + but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).\ + A test has the same exact configuration and fork context as the other entries in the `case` list of its `suite`. +- `forks_timeline`: a fork timeline definition, a YAML file containing a key for each fork-name, and a slot number as value. + +## Test format philosophy + +### Config design + +After long discussion, the following types of configured constants were identified: +- Never changing: genesis data +- Changing, but reliant on old value: e.g. a slot time may change, but if you want to do the conversion + `(genesis data, timestamp) -> slot number` you end up needing both constants. +- Changing, but kept around during fork transition: finalization may take a while, + e.g. an executable has to deal with new deposits and old deposits at the same time. Another example may be economic constants. +- Additional, back-wards compatible: new constants are introduced for later phases +- Changing: there is a very small chance some constant may really be *replaced*. + In this off-chance, it is likely better to include it as an additional variable, + and some clients may simply stop supporting the old one, if they do not want to sync from genesis. + +Based on these types of changes, we model the config as a list of key value pairs, + that only grows with every fork (they may change in development versions of forks however, git manages this). +With this approach, configurations are backwards compatible (older clients ignore unknown variables), and easy to maintain. + +### Fork config design + +There are two types of fork-data: +1) timeline: when does a fork take place? +2) coverage: what forks are covered by a test? + +The first is neat to have as a separate form: we prevent duplication, and can run with different presets + (e.g. fork timeline for a minimal local test, for a public testnet, or for main-net) + +The second is still somewhat ambiguous: some tests may want cover multiple forks, and can do so in different ways: +- run one test, transitioning from one to the other +- run the same test for both +- run a test for every transition from one fork to the other +- more + +There is a common factor here however: the options are exclusive, and give a clear idea on what test suites need to be ran to cover testing for a specific fork. +The way this list of forks is interpreted, is up to the test-runner: +State-transition test suites may want to just declare forks that are being covered in the test suite, + whereas shuffling test suites may want to declare a list of forks to test the shuffling algorithm for individually. + +### Test completeness + +We want tests to be independent from any sync-data. If one wants to run a test, the input data should be available from the YAML. +The aim is to provide clients with a well-defined scope of work to run a particular set of test-suites. + +- Clients that are complete are expected to contribute to testing, seeking for better resources to get conformance with the spec, and other clients. +- Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners. +- Clients that are on older versions can test there work based on older releases of the generated tests, and catch up with newer releases when possible. + +## Test Suite + +``` +title: -- Display name for the test suite +summary: -- Summarizes the test suite +forks_timeline: -- Used to determine the forking timeline +forks: -- Runner decides what to do: run for each fork, or run for all at once, each fork transition, etc. + - ... +config: -- Used to determine which set of constants to run (possibly compile time) with +runner: *MUST be consistent with folder structure* +handler: *MUST be consistent with folder structure* + +test_cases: + ... + +``` + +## Config + +A configuration is a separate YAML file. +Separation of configuration and tests aims to: +- prevent duplication of a minimal set of tests +- make all tests easy to upgrade when a new config constant is introduced. +- clearly define which constants to use +- share-able between clients, for cross-client short or long lived test-nets +- minimize the amounts of different constants permutations to compile as a client. \** + +\**: Some clients prefer compile-time constants and optimizations. +They should compile for each configuration once, and run the corresponding tests per build target. + + +## Fork-timeline + +A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration: + - we do not allocate or optimize any code based on slot numbers + - when we transition from one fork to the other, it is preferred to stay online. + - we may decide on a slot number for a fork based on external events (e.g. Eth1 log event), + a client should be able to activate a fork dynamically. + +Note that phases are considered to be "super forks", + i.e. they will just have a fork name, and be more heavy on changes. + +## Config sourcing + +The constants configurations are located in: + +``` +/configs/contants/.yaml +``` + +And copied by CI for testing purposes to: + +``` +/configs/contants/.yaml +``` + + +The fork timelines are located in: + +``` +/configs/fork_timelines/.yaml +``` + +And copied by CI for testing purposes to: + +``` +/configs/fork_timelines/.yaml +``` + +## Test structure + +To prevent parsing of hundreds of different YAML files to test a specific test type, + or even more specific, just a handler, tests should be structured in the following nested form: + +``` +. <--- root of eth2.0 tests repository +├── bls <--- collection of handler for a specific test-runner, example runner: "bls" +│   ├── signing <--- collection of test suites for a specific handler, example handler: "signing". If no handler, use a dummy folder "main" +│   │   ├── sign_msg.yml <--- an entry list of test suites +│   │   ... <--- more suite files (optional) +│   ... <--- more handlers +... <--- more test types +``` From 96ab5a334d851b40581746673fa1faa43e4d6081 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 3 Apr 2019 13:35:40 +1100 Subject: [PATCH 167/481] Add note on configuration of constants --- specs/core/0_beacon-chain.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d08828692..0beb6ca9f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -178,6 +178,9 @@ Code snippets appearing in `this style` are to be interpreted as Python code. ## Constants +Note: the default main-net values for the constants are included here for illustrative purposes. +The different configurations for main-net, test-nets, and yaml-based testing can be found in the `configs/contants/` directory. + ### Misc | Name | Value | From f8cdd66ffcd81275d0cd3273b3e98924e0a93169 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 3 Apr 2019 13:50:56 +1100 Subject: [PATCH 168/481] minor fixes --- README.md | 1 + test_generators/bls/README.md | 3 ++- test_generators/bls/main.py | 6 +----- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 294dd439d..6ea9f1c14 100644 --- a/README.md +++ b/README.md @@ -35,4 +35,5 @@ The following are the broad design goals for Ethereum 2.0: Documentation on the different components used during spec writing can be found here: * [YAML Test Generators](test_generators/README.md) * [Executable Python Spec](test_libs/pyspec/README.md) +* [Py-tests](py_tests/README.md) diff --git a/test_generators/bls/README.md b/test_generators/bls/README.md index 9ce1b2f6c..a21ad16d9 100644 --- a/test_generators/bls/README.md +++ b/test_generators/bls/README.md @@ -17,4 +17,5 @@ The base unit is bytes48 of which only 381 bits are used ## Comments -Compared to Zcash, Ethereum specs always requires the compressed form (c_flag / most significant bit always set). \ No newline at end of file +Compared to Zcash, Ethereum specs always requires the compressed form (c_flag / most significant bit always set). +Also note that pubkeys and privkeys are reversed. diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index 3c2ab454b..4c19c6249 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -1,7 +1,5 @@ """ BLS test vectors generator -Usage: - "python tgen_bls path/to/output.yml" """ # Standard library @@ -10,13 +8,11 @@ from typing import Tuple # Third-party import yaml +from py_ecc import bls # Ethereum from eth_utils import int_to_big_endian, big_endian_to_int -# Local imports -from py_ecc import bls - def int_to_hex(n: int) -> str: return '0x' + int_to_big_endian(n).hex() From 54eba8cbbe12298f8b83af90e5c8a9265fd3d1fe Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 14:12:48 +1100 Subject: [PATCH 169/481] Update specs/test_formats/README.md Co-Authored-By: protolambda --- specs/test_formats/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index fec018b4a..d0cb7db72 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -76,7 +76,7 @@ State-transition test suites may want to just declare forks that are being cover ### Test completeness -We want tests to be independent from any sync-data. If one wants to run a test, the input data should be available from the YAML. +Tests should be independent of any sync-data. If one wants to run a test, the input data should be available from the YAML. The aim is to provide clients with a well-defined scope of work to run a particular set of test-suites. - Clients that are complete are expected to contribute to testing, seeking for better resources to get conformance with the spec, and other clients. From 04b9ce8eac72665ee51ed95927c4b2204a1a33aa Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 14:13:05 +1100 Subject: [PATCH 170/481] Update specs/test_formats/README.md Co-Authored-By: protolambda --- specs/test_formats/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index d0cb7db72..061903799 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -130,7 +130,7 @@ Note that phases are considered to be "super forks", The constants configurations are located in: ``` -/configs/contants/.yaml +/configs/constants/.yaml ``` And copied by CI for testing purposes to: From 5790af7fc422b676c3d5e0289e0c1c565bbff285 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 14:13:17 +1100 Subject: [PATCH 171/481] Update specs/test_formats/README.md Co-Authored-By: protolambda --- specs/test_formats/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 061903799..14b358e67 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -113,7 +113,6 @@ Separation of configuration and tests aims to: \**: Some clients prefer compile-time constants and optimizations. They should compile for each configuration once, and run the corresponding tests per build target. - ## Fork-timeline A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration: From 55d21c1563ad9477dc9390e6f831492353a5e09e Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 14:13:29 +1100 Subject: [PATCH 172/481] Update specs/test_formats/README.md Co-Authored-By: protolambda --- specs/test_formats/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 14b358e67..85f8e7e07 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -135,7 +135,7 @@ The constants configurations are located in: And copied by CI for testing purposes to: ``` -/configs/contants/.yaml +/configs/constants/.yaml ``` From 9e010da1187b982f84f102a0a198e2f199ef3775 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 3 Apr 2019 14:18:17 +1100 Subject: [PATCH 173/481] rename pyspec pkg to eth2spec --- .gitignore | 2 +- Makefile | 4 ++-- README.md | 2 +- .../block_processing/test_process_attestation.py | 6 +++--- .../block_processing/test_process_attester_slashing.py | 4 ++-- .../block_processing/test_process_block_header.py | 2 +- .../phase0/block_processing/test_process_deposit.py | 4 ++-- .../block_processing/test_process_proposer_slashing.py | 4 ++-- .../phase0/block_processing/test_voluntary_exit.py | 4 ++-- py_tests/phase0/conftest.py | 2 +- py_tests/phase0/helpers.py | 8 ++++---- py_tests/phase0/test_sanity.py | 10 +++++----- scripts/phase0/build_spec.py | 4 ++-- test_generators/README.md | 2 +- test_libs/pyspec/README.md | 4 ++-- test_libs/pyspec/{pyspec => eth2spec}/__init__.py | 0 .../pyspec/{pyspec => eth2spec}/debug/__init__.py | 0 test_libs/pyspec/{pyspec => eth2spec}/debug/jsonize.py | 2 +- .../pyspec/{pyspec => eth2spec}/phase0/__init__.py | 0 .../{pyspec => eth2spec}/phase0/state_transition.py | 0 .../pyspec/{pyspec => eth2spec}/utils/__init__.py | 0 .../pyspec/{pyspec => eth2spec}/utils/bls_stub.py | 0 .../pyspec/{pyspec => eth2spec}/utils/hash_function.py | 0 .../{pyspec => eth2spec}/utils/merkle_minimal.py | 0 .../pyspec/{pyspec => eth2spec}/utils/minimal_ssz.py | 0 25 files changed, 32 insertions(+), 32 deletions(-) rename test_libs/pyspec/{pyspec => eth2spec}/__init__.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/debug/__init__.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/debug/jsonize.py (97%) rename test_libs/pyspec/{pyspec => eth2spec}/phase0/__init__.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/phase0/state_transition.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/utils/__init__.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/utils/bls_stub.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/utils/hash_function.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/utils/merkle_minimal.py (100%) rename test_libs/pyspec/{pyspec => eth2spec}/utils/minimal_ssz.py (100%) diff --git a/.gitignore b/.gitignore index 909996e73..ce047240a 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,4 @@ yaml_tests/ .pytest_cache # Dynamically built from Markdown spec -test_libs/pyspec/pyspec/phase0/spec.py +test_libs/pyspec/eth2spec/phase0/spec.py diff --git a/Makefile b/Makefile index 34d347118..bf23a1442 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) -PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/pyspec/phase0/spec.py +PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) @@ -40,7 +40,7 @@ pyspec: $(PY_SPEC_ALL_TARGETS) phase0: $(PY_SPEC_PHASE_0_TARGETS) -$(PY_SPEC_DIR)/pyspec/phase0/spec.py: +$(PY_SPEC_DIR)/eth2spec/phase0/spec.py: python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ diff --git a/README.md b/README.md index d68c55cf3..e575aebd6 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,6 @@ The following are the broad design goals for Ethereum 2.0: Documentation on the different components used during spec writing can be found here: * [YAML Test Generators](test_generators/README.md) -* [Executable Python Spec](test_libs/pyspec/README.md) +* [Executable Python Spec](test_libs/eth2spec/README.md) * [Py-tests](py_tests/README.md) diff --git a/py_tests/phase0/block_processing/test_process_attestation.py b/py_tests/phase0/block_processing/test_process_attestation.py index d454d6be4..bd4c7da12 100644 --- a/py_tests/phase0/block_processing/test_process_attestation.py +++ b/py_tests/phase0/block_processing/test_process_attestation.py @@ -1,12 +1,12 @@ from copy import deepcopy import pytest -import pyspec.phase0.spec as spec +import eth2spec.phase0.spec as spec -from pyspec.phase0.state_transition import ( +from eth2spec.phase0.state_transition import ( state_transition, ) -from pyspec.phase0.spec import ( +from eth2spec.phase0.spec import ( get_current_epoch, process_attestation, slot_to_epoch, diff --git a/py_tests/phase0/block_processing/test_process_attester_slashing.py b/py_tests/phase0/block_processing/test_process_attester_slashing.py index b1f836a29..5c97eb97b 100644 --- a/py_tests/phase0/block_processing/test_process_attester_slashing.py +++ b/py_tests/phase0/block_processing/test_process_attester_slashing.py @@ -1,8 +1,8 @@ from copy import deepcopy import pytest -import pyspec.phase0.spec as spec -from pyspec.phase0.spec import ( +import eth2spec.phase0.spec as spec +from eth2spec.phase0.spec import ( get_balance, get_beacon_proposer_index, process_attester_slashing, diff --git a/py_tests/phase0/block_processing/test_process_block_header.py b/py_tests/phase0/block_processing/test_process_block_header.py index 6c40260d5..0ffc6f613 100644 --- a/py_tests/phase0/block_processing/test_process_block_header.py +++ b/py_tests/phase0/block_processing/test_process_block_header.py @@ -2,7 +2,7 @@ from copy import deepcopy import pytest -from pyspec.phase0.spec import ( +from eth2spec.phase0.spec import ( get_beacon_proposer_index, cache_state, advance_slot, diff --git a/py_tests/phase0/block_processing/test_process_deposit.py b/py_tests/phase0/block_processing/test_process_deposit.py index cf911e29a..a424e2846 100644 --- a/py_tests/phase0/block_processing/test_process_deposit.py +++ b/py_tests/phase0/block_processing/test_process_deposit.py @@ -1,9 +1,9 @@ from copy import deepcopy import pytest -import pyspec.phase0.spec as spec +import eth2spec.phase0.spec as spec -from pyspec.phase0.spec import ( +from eth2spec.phase0.spec import ( get_balance, ZERO_HASH, process_deposit, diff --git a/py_tests/phase0/block_processing/test_process_proposer_slashing.py b/py_tests/phase0/block_processing/test_process_proposer_slashing.py index 3c3208b87..51e56bf70 100644 --- a/py_tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/py_tests/phase0/block_processing/test_process_proposer_slashing.py @@ -1,8 +1,8 @@ from copy import deepcopy import pytest -import pyspec.phase0.spec as spec -from pyspec.phase0.spec import ( +import eth2spec.phase0.spec as spec +from eth2spec.phase0.spec import ( get_balance, get_current_epoch, process_proposer_slashing, diff --git a/py_tests/phase0/block_processing/test_voluntary_exit.py b/py_tests/phase0/block_processing/test_voluntary_exit.py index e7457126d..51b44b5dc 100644 --- a/py_tests/phase0/block_processing/test_voluntary_exit.py +++ b/py_tests/phase0/block_processing/test_voluntary_exit.py @@ -1,9 +1,9 @@ from copy import deepcopy import pytest -import pyspec.phase0.spec as spec +import eth2spec.phase0.spec as spec -from pyspec.phase0.spec import ( +from eth2spec.phase0.spec import ( get_active_validator_indices, get_current_epoch, process_voluntary_exit, diff --git a/py_tests/phase0/conftest.py b/py_tests/phase0/conftest.py index fb866160a..ec499862c 100644 --- a/py_tests/phase0/conftest.py +++ b/py_tests/phase0/conftest.py @@ -1,6 +1,6 @@ import pytest -from pyspec.phase0 import spec +from eth2spec.phase0 import spec from .helpers import ( create_genesis_state, diff --git a/py_tests/phase0/helpers.py b/py_tests/phase0/helpers.py index 9ca05ba97..8ef6b5329 100644 --- a/py_tests/phase0/helpers.py +++ b/py_tests/phase0/helpers.py @@ -2,9 +2,9 @@ from copy import deepcopy from py_ecc import bls -import pyspec.phase0.spec as spec -from pyspec.utils.minimal_ssz import signed_root -from pyspec.phase0.spec import ( +import eth2spec.phase0.spec as spec +from eth2spec.utils.minimal_ssz import signed_root +from eth2spec.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -35,7 +35,7 @@ from pyspec.phase0.spec import ( verify_merkle_branch, hash, ) -from pyspec.utils.merkle_minimal import ( +from eth2spec.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/py_tests/phase0/test_sanity.py b/py_tests/phase0/test_sanity.py index 3ca390547..8c30b62f0 100644 --- a/py_tests/phase0/test_sanity.py +++ b/py_tests/phase0/test_sanity.py @@ -3,10 +3,10 @@ from copy import deepcopy import pytest from py_ecc import bls -import pyspec.phase0.spec as spec +import eth2spec.phase0.spec as spec -from pyspec.utils.minimal_ssz import signed_root -from pyspec.phase0.spec import ( +from eth2spec.utils.minimal_ssz import signed_root +from eth2spec.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -28,10 +28,10 @@ from pyspec.phase0.spec import ( verify_merkle_branch, hash, ) -from pyspec.phase0.state_transition import ( +from eth2spec.phase0.state_transition import ( state_transition, ) -from pyspec.utils.merkle_minimal import ( +from eth2spec.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index 8ec57d56d..fa7d1fb68 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -13,8 +13,8 @@ from typing import ( NewType, Tuple, ) -from pyspec.utils.minimal_ssz import * -from pyspec.utils.bls_stub import * +from eth2spec.utils.minimal_ssz import * +from eth2spec.utils.bls_stub import * """) diff --git a/test_generators/README.md b/test_generators/README.md index 51cca6561..bacb7229a 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -113,7 +113,7 @@ if __name__ == "__main__": And to use the pyspec: ``` -from pyspec.phase0 import spec +from eth2spec.phase0 import spec ``` Recommendations: diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 11ffa835a..08042e746 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -13,13 +13,13 @@ All the dynamic parts of the spec can be build at once with `make pyspec`. Alternatively, you can build a sub-set of the pyspec: `make phase0`. -Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/pyspec/phase0/spec.py` +Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py` ## Contributing Contributions are welcome, but consider implementing your idea as part of the spec itself first. The pyspec is not a replacement. -If you see opportunity to include any of the `pyspec/utils/` code in the spec, +If you see opportunity to include any of the `pyspec/eth2spec/utils/` code in the spec, please submit an issue or PR. ## License diff --git a/test_libs/pyspec/pyspec/__init__.py b/test_libs/pyspec/eth2spec/__init__.py similarity index 100% rename from test_libs/pyspec/pyspec/__init__.py rename to test_libs/pyspec/eth2spec/__init__.py diff --git a/test_libs/pyspec/pyspec/debug/__init__.py b/test_libs/pyspec/eth2spec/debug/__init__.py similarity index 100% rename from test_libs/pyspec/pyspec/debug/__init__.py rename to test_libs/pyspec/eth2spec/debug/__init__.py diff --git a/test_libs/pyspec/pyspec/debug/jsonize.py b/test_libs/pyspec/eth2spec/debug/jsonize.py similarity index 97% rename from test_libs/pyspec/pyspec/debug/jsonize.py rename to test_libs/pyspec/eth2spec/debug/jsonize.py index a77684543..3ea6fe3f5 100644 --- a/test_libs/pyspec/pyspec/debug/jsonize.py +++ b/test_libs/pyspec/eth2spec/debug/jsonize.py @@ -1,4 +1,4 @@ -from pyspec.utils.minimal_ssz import hash_tree_root +from eth2spec.utils.minimal_ssz import hash_tree_root def jsonize(value, typ, include_hash_tree_roots=False): diff --git a/test_libs/pyspec/pyspec/phase0/__init__.py b/test_libs/pyspec/eth2spec/phase0/__init__.py similarity index 100% rename from test_libs/pyspec/pyspec/phase0/__init__.py rename to test_libs/pyspec/eth2spec/phase0/__init__.py diff --git a/test_libs/pyspec/pyspec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py similarity index 100% rename from test_libs/pyspec/pyspec/phase0/state_transition.py rename to test_libs/pyspec/eth2spec/phase0/state_transition.py diff --git a/test_libs/pyspec/pyspec/utils/__init__.py b/test_libs/pyspec/eth2spec/utils/__init__.py similarity index 100% rename from test_libs/pyspec/pyspec/utils/__init__.py rename to test_libs/pyspec/eth2spec/utils/__init__.py diff --git a/test_libs/pyspec/pyspec/utils/bls_stub.py b/test_libs/pyspec/eth2spec/utils/bls_stub.py similarity index 100% rename from test_libs/pyspec/pyspec/utils/bls_stub.py rename to test_libs/pyspec/eth2spec/utils/bls_stub.py diff --git a/test_libs/pyspec/pyspec/utils/hash_function.py b/test_libs/pyspec/eth2spec/utils/hash_function.py similarity index 100% rename from test_libs/pyspec/pyspec/utils/hash_function.py rename to test_libs/pyspec/eth2spec/utils/hash_function.py diff --git a/test_libs/pyspec/pyspec/utils/merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py similarity index 100% rename from test_libs/pyspec/pyspec/utils/merkle_minimal.py rename to test_libs/pyspec/eth2spec/utils/merkle_minimal.py diff --git a/test_libs/pyspec/pyspec/utils/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py similarity index 100% rename from test_libs/pyspec/pyspec/utils/minimal_ssz.py rename to test_libs/pyspec/eth2spec/utils/minimal_ssz.py From 9fe9a000f538697faea8c840d5418adff7bdd676 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 3 Apr 2019 14:37:34 +1100 Subject: [PATCH 174/481] more explicit about relations between generator, runner, type, handler --- specs/test_formats/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 85f8e7e07..ad6424d8d 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -22,11 +22,14 @@ The particular formats of specific types of tests (test suites) are defined in s ## Glossary - `generator`: a program that outputs one or more `suite` files. + - A generator should only output one `type` of test. + - A generator is free to output multiple `suite` files, optionally with different `handler`s. - `type`: the specialization of one single `generator`. - `suite`: a YAML file with: - a header: describes the `suite`, and defines what the `suite` is for - a list of test cases -- `runner`: where a generator is a "producer", this is the 1-to-1 "consumer". A `runner` focuses on one `type`. +- `runner`: where a generator is a *"producer"*, this is the *"consumer"**. + - A `runner` focuses on *only one* `type`, and each type has *only one* `runner`. - `handler`: a `runner` may be too limited sometimes, you may have a `suite` with a specific focus that requires a different format. To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler. Using a `handler` in a `runner` is optional. From 1fa88fb6d1363a10512e179cb460cd6026d0c3c7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 3 Apr 2019 16:56:13 +1100 Subject: [PATCH 175/481] remove previous crosslink check from process_crosslinks --- specs/core/0_beacon-chain.md | 7 +- .../test_process_crosslinks.py | 92 +++++++++++++++++++ tests/phase0/helpers.py | 39 +++++++- 3 files changed, 131 insertions(+), 7 deletions(-) create mode 100644 tests/phase0/epoch_processing/test_process_crosslinks.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f95cb29e8..f3f66ce41 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1041,7 +1041,7 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: ```python def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: + attestation_data: AttestationData) -> List[ValidatorIndex]: # Find the committee in the list with the desired shard crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) @@ -1766,10 +1766,7 @@ def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Sha attestations = state.current_epoch_attestations if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_epoch_attestations crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks - valid_attestations = [ - a for a in attestations - if a.data.previous_crosslink == crosslinks[shard] and a.data.shard == shard - ] + valid_attestations = [a for a in attestations if a.data.shard == shard] all_roots = [a.data.crosslink_data_root for a in valid_attestations] # handle when no attestations for shard available diff --git a/tests/phase0/epoch_processing/test_process_crosslinks.py b/tests/phase0/epoch_processing/test_process_crosslinks.py new file mode 100644 index 000000000..a69950f21 --- /dev/null +++ b/tests/phase0/epoch_processing/test_process_crosslinks.py @@ -0,0 +1,92 @@ +from copy import deepcopy +import pytest + +import build.phase0.spec as spec + +from build.phase0.state_transition import ( + state_transition, +) +from build.phase0.spec import ( + ZERO_HASH, + cache_state, + get_crosslink_committee_for_attestation, + get_current_epoch, + process_crosslinks, + slot_to_epoch, +) +from tests.phase0.helpers import ( + add_attestation_to_state, + build_empty_block_for_next_slot, + fill_aggregate_attestation, + get_valid_attestation, + next_epoch, + set_bitfield_bit, +) + + +# mark entire file as 'crosslinks' +pytestmark = pytest.mark.crosslinks + + +def run_process_crosslinks(state, valid=True): + post_state = deepcopy(state) + + # transition state to slot before state transition + slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 + block = build_empty_block_for_next_slot(state) + block.slot = slot + state_transition(state, block) + + # cache state before epoch transition + cache_state(state) + + process_crosslinks(post_state) + + return state, post_state + + +def test_no_attestations(state): + pre_state, post_state = run_process_crosslinks(state) + + for shard in range(spec.SHARD_COUNT): + assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + + return pre_state, post_state + + +def test_single_crosslink_update_from_current_epoch(state): + next_epoch(state) + + attestation = get_valid_attestation(state) + + fill_aggregate_attestation(state, attestation) + add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + + assert len(state.current_epoch_attestations) == 1 + + pre_state, post_state = run_process_crosslinks(state) + + shard = attestation.data.shard + assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] + assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + + return pre_state, post_state + + +def test_single_crosslink_update_from_previous_epoch(state): + next_epoch(state) + + attestation = get_valid_attestation(state) + + fill_aggregate_attestation(state, attestation) + add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH) + + assert len(state.previous_epoch_attestations) == 1 + + pre_state, post_state = run_process_crosslinks(state) + + shard = attestation.data.shard + assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] + assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + + return pre_state, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 0fd7bab73..ead0a0489 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -4,6 +4,9 @@ from py_ecc import bls import build.phase0.spec as spec from build.phase0.utils.minimal_ssz import signed_root +from build.phase0.state_transition import ( + state_transition, +) from build.phase0.spec import ( # constants EMPTY_SIGNATURE, @@ -47,6 +50,19 @@ pubkeys = [bls.privtopub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} +def set_bitfield_bit(bitfield, i): + """ + Set the bit in ``bitfield`` at position ``i`` to ``1``. + """ + byte_index = i // 8 + bit_index = i % 8 + return ( + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index+1:] + ) + + def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): if not deposit_data_leaves: deposit_data_leaves = [] @@ -140,7 +156,7 @@ def build_attestation_data(state, slot, shard): if epoch_start_slot == slot: epoch_boundary_root = block_root else: - get_block_root(state, epoch_start_slot) + epoch_boundary_root = get_block_root(state, epoch_start_slot) if slot < epoch_start_slot: justified_block_root = state.previous_justified_root @@ -257,7 +273,7 @@ def get_valid_attester_slashing(state): def get_valid_attestation(state, slot=None): if slot is None: slot = state.slot - shard = state.latest_start_shard + shard = state.latest_start_shard + slot % spec.SLOTS_PER_EPOCH attestation_data = build_attestation_data(state, slot, shard) crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) @@ -309,3 +325,22 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) domain_type=spec.DOMAIN_ATTESTATION, ) ) + + +def fill_aggregate_attestation(state, attestation): + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) + for i in range(len(crosslink_committee)): + attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) + + +def add_attestation_to_state(state, attestation, slot): + block = build_empty_block_for_next_slot(state) + block.slot = slot + block.body.attestations.append(attestation) + state_transition(state, block) + + +def next_epoch(state): + block = build_empty_block_for_next_slot(state) + block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + state_transition(state, block) From bee740e834a04509c24a741d8f2e5abaaeb28e27 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 3 Apr 2019 01:06:18 -0500 Subject: [PATCH 176/481] Removed merkle partial from paths for now --- specs/light_client/merkle_proofs.md | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 2e92488cb..285445ca8 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -170,20 +170,6 @@ We define: } ``` -#### `merkle_partial_from_paths` - -```python -def merkle_partial_from_paths(obj, paths): - indices = set() - for path in paths: - indices = indices.union(get_generalized_indices(obj, path)) - return MerklePartial( - root=hash_tree_root(obj), - indices=indices, - values= mk_multi_proof - ) -``` - #### Proofs for execution We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `MerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. From 2529cb1d74f8a73f0d327480d9ebc3c052d639f9 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 3 Apr 2019 22:53:41 +0400 Subject: [PATCH 177/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e5102c56d..bdce36d9b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -589,7 +589,6 @@ The types are defined topologically to aid in facilitating an executable version # Validator registry 'validator_registry': [Validator], 'balances': ['uint64'], - 'validator_registry_update_epoch': 'uint64', # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], @@ -1525,7 +1524,6 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Validator registry validator_registry=[], balances=[], - validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), @@ -2022,7 +2020,11 @@ Run the following function: def update_registry(state: BeaconState) -> None: current_epoch = get_current_epoch(state) # Check if we should update, and if so, update - if state.finalized_epoch > state.validator_registry_update_epoch: + + activations_since_finalization = len([index in state.validator_registry if + state.validator_registry[index].activation_epoch > state.finalized_epoch + ACTIVATION_EXIT_DELAY + ]) + if MAX_EXIT_DEQUEUES_PER_EPOCH > activations_since_finalization: # Validator indices that could be activated indices_for_activation = sorted( filter( @@ -2031,10 +2033,9 @@ def update_registry(state: BeaconState) -> None: ), key=lambda index: state.validator_registry[index].activation_eligibility_epoch ) - for index in indices_for_activation[:MAX_EXIT_DEQUEUES_PER_EPOCH]: + for index in indices_for_activation[:MAX_EXIT_DEQUEUES_PER_EPOCH - activations_since_finalization]: activate_validator(state, index, is_genesis=False) - state.validator_registry_update_epoch = current_epoch state.latest_start_shard = ( state.latest_start_shard + get_current_epoch_committee_count(state) From 76893cafaadb2610fdc171f07fe2cba58bdca132 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 3 Apr 2019 23:22:27 +0400 Subject: [PATCH 178/481] Missing typehint for `convert_to_indexed` And some minor cleanups --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 70c5b67f6..3a2bb3903 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1160,9 +1160,9 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: ### `convert_to_indexed` ```python -def convert_to_indexed(state: BeaconState, attestation: Attestation): +def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedAttestation: """ - Convert an attestation to (almost) indexed-verifiable form + Convert an ``attestation`` to (almost) indexed-verifiable form. """ attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) @@ -1172,7 +1172,7 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation): custody_bit_0_indices=custody_bit_0_indices, custody_bit_1_indices=custody_bit_1_indices, data=attestation.data, - aggregate_signature=attestation.aggregate_signature + aggregate_signature=attestation.aggregate_signature, ) ``` From 6ca3c64526a1683aa72866d6b6f7a210cbea399f Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 3 Apr 2019 23:24:46 +0400 Subject: [PATCH 179/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3a2bb3903..9bec7841f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1162,7 +1162,7 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: ```python def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedAttestation: """ - Convert an ``attestation`` to (almost) indexed-verifiable form. + Convert ``attestation`` to (almost) indexed-verifiable form. """ attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) From d1af9144bc0ae464e5ceca4b9dd19993a5ab7297 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 3 Apr 2019 23:40:54 +0400 Subject: [PATCH 180/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f3f66ce41..65e48c69c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -354,7 +354,7 @@ The types are defined topologically to aid in facilitating an executable version # Crosslink vote 'shard': 'uint64', - 'previous_crosslink': Crosslink, + 'source_crosslink': Crosslink, 'crosslink_data_root': 'bytes32', } ``` @@ -2325,27 +2325,26 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: Process ``Attestation`` operation. Note that this function mutates ``state``. """ - assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY + data = attestation.data + assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= data.slot + assert data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - # Check target epoch, source epoch, and source root - target_epoch = slot_to_epoch(attestation.data.slot) - assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), - (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), + # Check target epoch, source epoch, source root, and source crosslink + target_epoch = slot_to_epoch(data.slot) + assert (target_epoch, data.source_epoch, data.source_root, data.source_crosslink) in { + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, state.current_crosslinks[data.shard]), + (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, state.previous_crosslinks[data.shard]), } - # Check crosslink data - assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] - crosslinks = state.current_crosslinks if slot_to_epoch(attestation.data.slot) == get_current_epoch(state) else state.previous_crosslinks - assert crosslinks[attestation.data.shard] == attestation.data.previous_crosslink + # Check crosslink data root + assert data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] # Check signature and bitfields assert verify_indexed_attestation(state, convert_to_indexed(state, attestation)) # Cache pending attestation pending_attestation = PendingAttestation( - data=attestation.data, + data=data, aggregation_bitfield=attestation.aggregation_bitfield, custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot From a790afa8915b31a0d0ea798f1b87d6ccecd6cd5e Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 4 Apr 2019 08:23:48 +0400 Subject: [PATCH 181/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 65e48c69c..1e9aa4308 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1764,7 +1764,6 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe ```python def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: attestations = state.current_epoch_attestations if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_epoch_attestations - crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks valid_attestations = [a for a in attestations if a.data.shard == shard] all_roots = [a.data.crosslink_data_root for a in valid_attestations] @@ -1860,7 +1859,6 @@ def process_crosslinks(state: BeaconState) -> None: current_epoch = get_current_epoch(state) previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) next_epoch = current_epoch + 1 - next_previous_crosslinks = [crosslink for crosslink in state.current_crosslinks] for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): @@ -1873,7 +1871,7 @@ def process_crosslinks(state: BeaconState) -> None: crosslink_data_root=winning_root, ) - state.previous_crosslinks = next_previous_crosslinks + state.previous_crosslinks = state.current_crosslinks ``` #### Eth1 data From 3e6dc59ec056c88617cf6b40056da46667b06f13 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 4 Apr 2019 08:26:11 +0400 Subject: [PATCH 182/481] Update helpers.py --- tests/phase0/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index ead0a0489..afb03157a 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -172,7 +172,7 @@ def build_attestation_data(state, slot, shard): source_root=justified_block_root, target_root=epoch_boundary_root, crosslink_data_root=spec.ZERO_HASH, - previous_crosslink=deepcopy(crosslinks[shard]), + source_crosslink=deepcopy(crosslinks[shard]), ) From 5a77e61160965b6c3c1714aa0b9aa0aa2528e144 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 4 Apr 2019 08:11:12 -0700 Subject: [PATCH 183/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9bec7841f..3f690c888 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1041,9 +1041,12 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: ```python def get_crosslink_committee_for_attestation(state: BeaconState, attestation_data: AttestationData) -> List[ValidatorIndex]: - # Find the committee in the list with the desired shard + """ + Return the crosslink committee corresponding to ``attestation_data``. + """ crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - + + # Find the committee in the list with the desired shard assert attestation_data.shard in [shard for _, shard in crosslink_committees] crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] From 13fc498f7b57441c7604cbcab9319df4a8458b1e Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 5 Apr 2019 19:24:09 +1100 Subject: [PATCH 184/481] Update specs/test_formats/README.md Co-Authored-By: protolambda --- specs/test_formats/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index ad6424d8d..1173c06aa 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -93,7 +93,7 @@ title: -- Display name for the test suite summary: -- Summarizes the test suite forks_timeline: -- Used to determine the forking timeline forks: -- Runner decides what to do: run for each fork, or run for all at once, each fork transition, etc. - - ... + - ... config: -- Used to determine which set of constants to run (possibly compile time) with runner: *MUST be consistent with folder structure* handler: *MUST be consistent with folder structure* From 4bf20a1208438480b72b8ca920f31c381a383971 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 5 Apr 2019 19:29:58 +1100 Subject: [PATCH 185/481] remove confusing note --- specs/test_formats/README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 1173c06aa..d37b0b538 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -124,9 +124,6 @@ A fork timeline is (preferably) loaded in as a configuration object into a clien - we may decide on a slot number for a fork based on external events (e.g. Eth1 log event), a client should be able to activate a fork dynamically. -Note that phases are considered to be "super forks", - i.e. they will just have a fork name, and be more heavy on changes. - ## Config sourcing The constants configurations are located in: From dc325f7682cd4dec3270b1c2b76d638c3219bf91 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 5 Apr 2019 19:39:03 +1100 Subject: [PATCH 186/481] clean up a few things from PR --- Makefile | 2 +- specs/core/0_beacon-chain.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 88f17dcf9..5be5ce4fe 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ clean: # runs a limited set of tests against a minimal config # run pytest with `-m` option to full suite -test: +test: clean $(BUILD_DIR)/phase0 pytest -m minimal_config tests/ diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1e9aa4308..4e1bcab16 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1860,6 +1860,8 @@ def process_crosslinks(state: BeaconState) -> None: previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) next_epoch = current_epoch + 1 + state.previous_crosslinks = [crosslink for crosslink in state.current_crosslinks] + for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_root, participants = get_winning_root_and_participants(state, slot, shard) @@ -1870,8 +1872,6 @@ def process_crosslinks(state: BeaconState) -> None: epoch=min(slot_to_epoch(slot), state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), crosslink_data_root=winning_root, ) - - state.previous_crosslinks = state.current_crosslinks ``` #### Eth1 data From 09ceccacb4a7a4a3a8db5f14a40a663762ccd4ea Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 5 Apr 2019 22:31:37 +1100 Subject: [PATCH 187/481] consistent naming of network types --- specs/test_formats/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index d37b0b538..c572edac1 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -64,7 +64,7 @@ There are two types of fork-data: 2) coverage: what forks are covered by a test? The first is neat to have as a separate form: we prevent duplication, and can run with different presets - (e.g. fork timeline for a minimal local test, for a public testnet, or for main-net) + (e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet) The second is still somewhat ambiguous: some tests may want cover multiple forks, and can do so in different ways: - run one test, transitioning from one to the other @@ -110,7 +110,7 @@ Separation of configuration and tests aims to: - prevent duplication of a minimal set of tests - make all tests easy to upgrade when a new config constant is introduced. - clearly define which constants to use -- share-able between clients, for cross-client short or long lived test-nets +- share-able between clients, for cross-client short or long lived testnets - minimize the amounts of different constants permutations to compile as a client. \** \**: Some clients prefer compile-time constants and optimizations. From 199e7849dafc48145dfbc6779ef2ccfa04e4a718 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 6 Apr 2019 04:07:26 -0500 Subject: [PATCH 188/481] Clarify lexicographic hash favoring (#881) --- specs/core/0_beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3f690c888..60e7864c2 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1664,6 +1664,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) children = get_children(store, head) if len(children) == 0: return head + # Ties broken by favoring block with lexicographically higher root head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) ``` From 169579ce3ba5e09572afd6a00c8ec7ae3211e11f Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 6 Apr 2019 20:45:11 +1100 Subject: [PATCH 189/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bdce36d9b..deb53fd4b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -183,7 +183,8 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | -| `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | +| `MAX_EXITS_PER_EPOCH` | `2**2` (= 4) | +| `MAX_ACTIVATIONS_PER_FINALIZED_EPOCH` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -1384,7 +1385,7 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: if state.exit_epoch < delayed_activation_exit_epoch: state.exit_epoch = delayed_activation_exit_epoch - if state.exit_queue_filled >= MAX_EXIT_DEQUEUES_PER_EPOCH: + if state.exit_queue_filled >= MAX_EXITS_PER_EPOCH: state.exit_epoch += 1 state.exit_queue_filled = 0 @@ -2018,23 +2019,13 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - # Check if we should update, and if so, update + activation_queue = sorted([validator in enumerate(validators) if + validator.activation_epoch == FAR_FUTURE_EPOCH and + validator.activation_eligibility_epoch > get_delayed_activation_exit_epoch(state.finalized_epoch) + ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) - activations_since_finalization = len([index in state.validator_registry if - state.validator_registry[index].activation_epoch > state.finalized_epoch + ACTIVATION_EXIT_DELAY - ]) - if MAX_EXIT_DEQUEUES_PER_EPOCH > activations_since_finalization: - # Validator indices that could be activated - indices_for_activation = sorted( - filter( - lambda index: state.validator_registry[index].activation_epoch == FAR_FUTURE_EPOCH, - get_active_validator_indices(state.validator_registry, current_epoch), - ), - key=lambda index: state.validator_registry[index].activation_eligibility_epoch - ) - for index in indices_for_activation[:MAX_EXIT_DEQUEUES_PER_EPOCH - activations_since_finalization]: - activate_validator(state, index, is_genesis=False) + for index in activation_queue[:MAX_ACTIVATIONS_PER_FINALIZED_EPOCH]: + activate_validator(state, index, is_genesis=False) state.latest_start_shard = ( state.latest_start_shard + @@ -2042,8 +2033,6 @@ def update_registry(state: BeaconState) -> None: ) % SHARD_COUNT ``` -**Invariant**: the active index root that is hashed into the shuffling seed actually is the `hash_tree_root` of the validator set that is used for that epoch. - #### Slashings and exit queue Run `process_slashings(state)`: From 7f0a93fc3e095ea4ae1d9892934a89c33a8723ed Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 6 Apr 2019 21:07:03 +1100 Subject: [PATCH 190/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index deb53fd4b..4d054c2b6 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -122,8 +122,8 @@ - [Crosslinks](#crosslinks-1) - [Apply rewards](#apply-rewards) - [Balance-driven status transitions](#balance-driven-status-transitions) - - [Validator registry and start shard](#validator-registry-and-start-shard) - - [Slashings and exit queue](#slashings-and-exit-queue) + - [Activation queue and start shard](#activation-queue-and-start-shard) + - [Slashings](#slashings) - [Final updates](#final-updates) - [Per-slot processing](#per-slot-processing) - [Per-block processing](#per-block-processing) @@ -2013,7 +2013,7 @@ def process_balance_driven_status_transitions(state: BeaconState) -> None: initiate_validator_exit(state, index) ``` -#### Validator registry and start shard +#### Activation queue and start shard Run the following function: @@ -2021,7 +2021,8 @@ Run the following function: def update_registry(state: BeaconState) -> None: activation_queue = sorted([validator in enumerate(validators) if validator.activation_epoch == FAR_FUTURE_EPOCH and - validator.activation_eligibility_epoch > get_delayed_activation_exit_epoch(state.finalized_epoch) + validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and + validator.activation_eligibility_epoch > state.finalized_epoch ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) for index in activation_queue[:MAX_ACTIVATIONS_PER_FINALIZED_EPOCH]: @@ -2033,7 +2034,7 @@ def update_registry(state: BeaconState) -> None: ) % SHARD_COUNT ``` -#### Slashings and exit queue +#### Slashings Run `process_slashings(state)`: From 63412d9b9e2752280e48703adf262eb6b0285121 Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 6 Apr 2019 22:13:56 +1100 Subject: [PATCH 191/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4d054c2b6..f947c7d49 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1565,7 +1565,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], process_deposit(state, deposit) # Process genesis activations - for validator_index, _ in enumerate(state.validator_registry): + for validator_index in state.validator_registry: if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: activate_validator(state, validator_index, is_genesis=True) @@ -2019,7 +2019,7 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: - activation_queue = sorted([validator in enumerate(validators) if + activation_queue = sorted([validator for _, validator in enumerate(validators) if validator.activation_epoch == FAR_FUTURE_EPOCH and validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and validator.activation_eligibility_epoch > state.finalized_epoch From 5ea5746fdd09462a94d8ea68f1e3f777849e2aac Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 7 Apr 2019 01:24:50 +1100 Subject: [PATCH 192/481] Fix `get_genesis_beacon_state` and minor refactoring --- specs/core/0_beacon-chain.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f947c7d49..3e9189739 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -831,11 +831,11 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: ```python def get_split_offset(list_size: int, chunks: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] - """ - return (list_size * index) // chunks + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] + """ + return (list_size * index) // chunks ``` ### `get_epoch_committee_count` @@ -1042,7 +1042,7 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: ```python def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: + attestation_data: AttestationData) -> List[ValidatorIndex]: # Find the committee in the list with the desired shard crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) @@ -1565,7 +1565,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], process_deposit(state, deposit) # Process genesis activations - for validator_index in state.validator_registry: + for validator_index in range(len(state.validator_registry)): if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: activate_validator(state, validator_index, is_genesis=True) @@ -2019,7 +2019,8 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: - activation_queue = sorted([validator for _, validator in enumerate(validators) if + activation_queue = sorted([ + validator for validator in state.validator_registry if validator.activation_epoch == FAR_FUTURE_EPOCH and validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and validator.activation_eligibility_epoch > state.finalized_epoch From ebba3f5891fd8c686820f77d6f35f576f70d2b94 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 7 Apr 2019 01:59:10 +1100 Subject: [PATCH 193/481] Fix typo --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2501343ff..079971fc8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1721,7 +1721,7 @@ def get_unslashed_attesting_indices(state: BeaconState, attestations: List[Pendi output = set() for a in attestations: output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) - return sorted(filter(lambda index: not state.validator_registry[index].is_slashed, list(output))) + return sorted(filter(lambda index: not state.validator_registry[index].slashed, list(output))) ``` ```python From 00872e0e9260fc63a80e9535d4949503ed8b71be Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 7 Apr 2019 02:09:07 +1100 Subject: [PATCH 194/481] Updated tests 1. No Validator.initiated_exit field. Fix the related tests. 2. Update state_transition flow - rename `process_ejections` to `process_balance_driven_status_transitions` and remove `process_exit_queue` 3. Linter, formatting --- .../test_process_attestation.py | 5 +-- .../test_process_attester_slashing.py | 1 - .../test_process_block_header.py | 2 +- .../test_process_proposer_slashing.py | 1 - .../block_processing/test_voluntary_exit.py | 38 +------------------ tests/phase0/helpers.py | 3 +- tests/phase0/test_sanity.py | 18 +++------ utils/phase0/state_transition.py | 3 +- 8 files changed, 13 insertions(+), 58 deletions(-) diff --git a/tests/phase0/block_processing/test_process_attestation.py b/tests/phase0/block_processing/test_process_attestation.py index ca6933ce7..1e8ee4488 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/tests/phase0/block_processing/test_process_attestation.py @@ -7,7 +7,6 @@ from build.phase0.state_transition import ( state_transition, ) from build.phase0.spec import ( - ZERO_HASH, get_current_epoch, process_attestation, slot_to_epoch, @@ -102,7 +101,7 @@ def test_bad_source_root(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - attestation.data.source_root = b'\x42'*32 + attestation.data.source_root = b'\x42' * 32 pre_state, post_state = run_attestation_processing(state, attestation, False) @@ -113,7 +112,7 @@ def test_non_zero_crosslink_data_root(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - attestation.data.crosslink_data_root = b'\x42'*32 + attestation.data.crosslink_data_root = b'\x42' * 32 pre_state, post_state = run_attestation_processing(state, attestation, False) diff --git a/tests/phase0/block_processing/test_process_attester_slashing.py b/tests/phase0/block_processing/test_process_attester_slashing.py index 06f214c4b..cf58ee244 100644 --- a/tests/phase0/block_processing/test_process_attester_slashing.py +++ b/tests/phase0/block_processing/test_process_attester_slashing.py @@ -31,7 +31,6 @@ def run_attester_slashing_processing(state, attester_slashing, valid=True): slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] slashed_validator = post_state.validator_registry[slashed_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH diff --git a/tests/phase0/block_processing/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py index 4981b656c..508a70b19 100644 --- a/tests/phase0/block_processing/test_process_block_header.py +++ b/tests/phase0/block_processing/test_process_block_header.py @@ -54,7 +54,7 @@ def test_invalid_slot(state): def test_invalid_previous_block_root(state): block = build_empty_block_for_next_slot(state) - block.previous_block_root = b'\12'*32 # invalid prev root + block.previous_block_root = b'\12' * 32 # invalid prev root pre_state, post_state = run_block_header_processing(state, block, valid=False) return pre_state, block, None diff --git a/tests/phase0/block_processing/test_process_proposer_slashing.py b/tests/phase0/block_processing/test_process_proposer_slashing.py index 467d2164b..317829518 100644 --- a/tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/tests/phase0/block_processing/test_process_proposer_slashing.py @@ -30,7 +30,6 @@ def run_proposer_slashing_processing(state, proposer_slashing, valid=True): process_proposer_slashing(post_state, proposer_slashing) slashed_validator = post_state.validator_registry[proposer_slashing.proposer_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 6adc81464..7627f1f0b 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -47,8 +47,8 @@ def test_success(state): # process_voluntary_exit(post_state, voluntary_exit) - assert not pre_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].initiated_exit + assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH return pre_state, voluntary_exit, post_state @@ -111,37 +111,6 @@ def test_validator_already_exited(state): return pre_state, voluntary_exit, None -def test_validator_already_initiated_exit(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - def test_validator_not_active_long_enough(state): pre_state = deepcopy(state) # @@ -151,9 +120,6 @@ def test_validator_not_active_long_enough(state): validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - # but validator already has initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - # # build voluntary exit # diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 33f394def..e60c7c64c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -25,7 +25,6 @@ from build.phase0.spec import ( get_attestation_participants, get_block_root, get_crosslink_committee_for_attestation, - get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, @@ -249,7 +248,7 @@ def get_valid_proposer_slashing(state): def get_valid_attester_slashing(state): attestation_1 = get_valid_attestation(state) attestation_2 = deepcopy(attestation_1) - attestation_2.data.target_root = b'\x01'*32 + attestation_2.data.target_root = b'\x01' * 32 return AttesterSlashing( attestation_1=convert_to_indexed(state, attestation_1), diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 90825242f..a2ce8928b 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -128,11 +128,9 @@ def test_proposer_slashing(state): block.body.proposer_slashings.append(proposer_slashing) state_transition(test_state, block) - assert not state.validator_registry[validator_index].initiated_exit assert not state.validator_registry[validator_index].slashed slashed_validator = test_state.validator_registry[validator_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH @@ -154,11 +152,9 @@ def test_attester_slashing(state): block.body.attester_slashings.append(attester_slashing) state_transition(test_state, block) - assert not state.validator_registry[validator_index].initiated_exit assert not state.validator_registry[validator_index].slashed slashed_validator = test_state.validator_registry[validator_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH @@ -316,9 +312,7 @@ def test_voluntary_exit(state): initiate_exit_block.body.voluntary_exits.append(voluntary_exit) state_transition(post_state, initiate_exit_block) - assert not pre_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # # Process within epoch transition @@ -350,8 +344,6 @@ def test_no_exit_churn_too_long_since_change(state): pre_state.validator_registry_update_epoch = ( get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH ) - # set validator to have previously initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True post_state = deepcopy(pre_state) @@ -362,8 +354,10 @@ def test_no_exit_churn_too_long_since_change(state): block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) - assert post_state.validator_registry_update_epoch == get_current_epoch(post_state) - 1 + assert post_state.validator_registry[validator_index].activation_eligibility_epoch == spec.FAR_FUTURE_EPOCH assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert post_state.exit_queue_filled == pre_state.exit_queue_filled + assert post_state.exit_epoch == pre_state.exit_epoch return pre_state, [block], post_state @@ -419,7 +413,7 @@ def test_transfer(state): return pre_state, [block], post_state -def test_ejection(state): +def test_balance_driven_status_transitions(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) @@ -438,7 +432,7 @@ def test_ejection(state): block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) - assert post_state.validator_registry[validator_index].initiated_exit == True + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH return pre_state, [block], post_state diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py index 2c420014f..3a3601ddb 100644 --- a/utils/phase0/state_transition.py +++ b/utils/phase0/state_transition.py @@ -94,10 +94,9 @@ def process_epoch_transition(state: BeaconState) -> None: spec.process_crosslinks(state) spec.maybe_reset_eth1_period(state) spec.apply_rewards(state) - spec.process_ejections(state) + spec.process_balance_driven_status_transitions(state) spec.update_registry(state) spec.process_slashings(state) - spec.process_exit_queue(state) spec.finish_epoch_update(state) From 47464f28dc014ea2bb2a20121a1353fc80180174 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 7 Apr 2019 08:45:43 +1000 Subject: [PATCH 195/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 079971fc8..8a7b24cec 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1371,7 +1371,11 @@ def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bo """ validator = state.validator_registry[index] - validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) + if is_genesis: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + else: + validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` #### `initiate_validator_exit` @@ -2027,9 +2031,8 @@ Run the following function: def update_registry(state: BeaconState) -> None: activation_queue = sorted([ validator for validator in state.validator_registry if - validator.activation_epoch == FAR_FUTURE_EPOCH and validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and - validator.activation_eligibility_epoch > state.finalized_epoch + validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch) ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) for index in activation_queue[:MAX_ACTIVATIONS_PER_FINALIZED_EPOCH]: From 4630b136dacba2689a1277de3fd4d7015eb0f132 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 7 Apr 2019 10:03:07 +1000 Subject: [PATCH 196/481] Fix/Remove pointless assertion --- tests/phase0/test_sanity.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index a2ce8928b..612227cfa 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -354,7 +354,6 @@ def test_no_exit_churn_too_long_since_change(state): block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) - assert post_state.validator_registry[validator_index].activation_eligibility_epoch == spec.FAR_FUTURE_EPOCH assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH assert post_state.exit_queue_filled == pre_state.exit_queue_filled assert post_state.exit_epoch == pre_state.exit_epoch From 846e2d61478b1f55fc8675a45953bbf7ecc4b335 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 7 Apr 2019 11:06:14 +1000 Subject: [PATCH 197/481] Remove `force_registry_change_at_next_epoch` --- tests/phase0/helpers.py | 8 -------- tests/phase0/test_sanity.py | 9 --------- 2 files changed, 17 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index e60c7c64c..34f2f8d7f 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -95,14 +95,6 @@ def create_genesis_state(num_validators, deposit_data_leaves=None): ) -def force_registry_change_at_next_epoch(state): - # artificially trigger registry update at next epoch transition - state.finalized_epoch = get_current_epoch(state) - 1 - for crosslink in state.latest_crosslinks: - crosslink.epoch = state.finalized_epoch - state.validator_registry_update_epoch = state.finalized_epoch - 1 - - def build_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 612227cfa..7ed31b7bd 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -39,7 +39,6 @@ from build.phase0.utils.merkle_minimal import ( from tests.phase0.helpers import ( build_deposit_data, build_empty_block_for_next_slot, - force_registry_change_at_next_epoch, get_valid_attestation, get_valid_attester_slashing, get_valid_proposer_slashing, @@ -285,8 +284,6 @@ def test_voluntary_exit(state): # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - # artificially trigger registry update at next epoch transition - force_registry_change_at_next_epoch(pre_state) post_state = deepcopy(pre_state) @@ -338,12 +335,6 @@ def test_no_exit_churn_too_long_since_change(state): # # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - # artificially trigger registry update at next epoch transition - force_registry_change_at_next_epoch(pre_state) - # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH - pre_state.validator_registry_update_epoch = ( - get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH - ) post_state = deepcopy(pre_state) From 1c81638e8661292798e2cab03fc58d5dd319272c Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 12:07:10 +1000 Subject: [PATCH 198/481] forks are based on epoch numbers, as per spec --- specs/test_formats/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index c572edac1..54c478bb3 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -36,7 +36,7 @@ The particular formats of specific types of tests (test suites) are defined in s - `case`: a test case, an entry in the `test_cases` list of a `suite`. A case can be anything in general, but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).\ A test has the same exact configuration and fork context as the other entries in the `case` list of its `suite`. -- `forks_timeline`: a fork timeline definition, a YAML file containing a key for each fork-name, and a slot number as value. +- `forks_timeline`: a fork timeline definition, a YAML file containing a key for each fork-name, and an epoch number as value. ## Test format philosophy @@ -44,8 +44,8 @@ The particular formats of specific types of tests (test suites) are defined in s After long discussion, the following types of configured constants were identified: - Never changing: genesis data -- Changing, but reliant on old value: e.g. a slot time may change, but if you want to do the conversion - `(genesis data, timestamp) -> slot number` you end up needing both constants. +- Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion + `(genesis data, timestamp) -> epoch number` you end up needing both constants. - Changing, but kept around during fork transition: finalization may take a while, e.g. an executable has to deal with new deposits and old deposits at the same time. Another example may be economic constants. - Additional, back-wards compatible: new constants are introduced for later phases @@ -119,9 +119,9 @@ They should compile for each configuration once, and run the corresponding tests ## Fork-timeline A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration: - - we do not allocate or optimize any code based on slot numbers + - we do not allocate or optimize any code based on epoch numbers - when we transition from one fork to the other, it is preferred to stay online. - - we may decide on a slot number for a fork based on external events (e.g. Eth1 log event), + - we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event), a client should be able to activate a fork dynamically. ## Config sourcing From c5ab5435a1f5cc155b8c99bd12af4630fb49e5fb Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 14:46:37 +1000 Subject: [PATCH 199/481] include example configs and fork timelines, with format spec --- configs/constant_presets/README.md | 20 ++++ configs/constant_presets/mainnet.yaml | 124 +++++++++++++++++++++++++ configs/constant_presets/minimal.yaml | 127 ++++++++++++++++++++++++++ configs/fork_timelines/README.md | 18 ++++ configs/fork_timelines/mainnet.yaml | 12 +++ specs/test_formats/README.md | 24 +++-- 6 files changed, 315 insertions(+), 10 deletions(-) create mode 100644 configs/constant_presets/README.md create mode 100644 configs/constant_presets/mainnet.yaml create mode 100644 configs/constant_presets/minimal.yaml create mode 100644 configs/fork_timelines/README.md create mode 100644 configs/fork_timelines/mainnet.yaml diff --git a/configs/constant_presets/README.md b/configs/constant_presets/README.md new file mode 100644 index 000000000..45148862e --- /dev/null +++ b/configs/constant_presets/README.md @@ -0,0 +1,20 @@ +# Constant Presets + +This directory contains a set of constants presets used for testing, testnets, and mainnet. + +A preset file contains all the constants known for its target. +Later-fork constants can be ignored, e.g. ignore phase1 constants as a client that only supports phase 0 currently. + +## Format + +Each preset is a key-value mapping. + +**Key**: an `UPPER_SNAKE_CASE` (a.k.a. "macro case") formatted string, name of the constant. +**Value**: can be any of: + - an unsigned integer number, can be up to 64 bits (incl.) + - a hexadecimal string, prefixed with `0x` + +Presets may contain comments to describe the values. + +See `mainnet.yaml` for a complete example. + diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml new file mode 100644 index 000000000..6eef9ad81 --- /dev/null +++ b/configs/constant_presets/mainnet.yaml @@ -0,0 +1,124 @@ +# Mainnet preset +# Note: the intention of this file (for now) is to illustrate what a mainnet configuration could look like. +# Some of these constants may still change before the launch of Phase 0. + + +# Misc +# --------------------------------------------------------------- +# 2**10 ` (= 1,024) +SHARD_COUNT: 1024 +# 2**7 ` (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**5 ` (= 32) +MAX_BALANCE_CHURN_QUOTIENT: 32 +# 2**12 ` (= 4,096) +MAX_ATTESTATION_PARTICIPANTS: 4096 +# 2**2 ` (= 4) +MAX_EXIT_DEQUEUES_PER_EPOCH: 4 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 + + +# Deposit contract +# --------------------------------------------------------------- +# **TBD** +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123567890123456789012357890 +# 2**5 ` (= 32) +DEPOSIT_CONTRACT_TREE_DEPTH: 32 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 ` (= 32,000,000,000) Gwei +MAX_DEPOSIT_AMOUNT: 32000000000 +# 2**4 * 10**9 ` (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +HIGH_BALANCE_INCREMENT: 1000000000 + + +# Initial values +# --------------------------------------------------------------- +GENESIS_FORK_VERSION: 0x00000000 +# 2**32, GENESIS_EPOCH is derived from this constant +GENESIS_SLOT: 4294967296 +GENESIS_START_SHARD: 0 +# 2**64 - 1 +FAR_FUTURE_EPOCH: 18446744073709551615 +BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 + + +# Time parameters +# --------------------------------------------------------------- +# 6 seconds 6 seconds +SECONDS_PER_SLOT: 6 +# 2**2 ` (= 4) slots 24 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 4 +# 2**6 ` (= 64) slots 6.4 minutes +SLOTS_PER_EPOCH: 64 +# 2**0 ` (= 1) epochs 6.4 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 ` (= 4) epochs 25.6 minutes +ACTIVATION_EXIT_DELAY: 4 +# 2**4 ` (= 16) epochs ~1.7 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 16 +# 2**13 ` (= 8,192) slots ~13 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 ` (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**11 ` (= 2,048) epochs 9 days +PERSISTENT_COMMITTEE_PERIOD: 2048 +# 2**6 ` (= 64) +MAX_CROSSLINK_EPOCHS: 64 + + +# State list lengths +# --------------------------------------------------------------- +# 2**13 ` (= 8,192) epochs ~36 days +LATEST_RANDAO_MIXES_LENGTH: 8192 +# 2**13 ` (= 8,192) epochs ~36 days +LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192 +# 2**13 ` (= 8,192) epochs ~36 days +LATEST_SLASHED_EXIT_LENGTH: 8192 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**5 ` (= 32) +BASE_REWARD_QUOTIENT: 32 +# 2**9 ` (= 512) +WHISTLEBLOWING_REWARD_QUOTIENT: 512 +# 2**3 ` (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**24 ` (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT: 16777216 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**5 ` (= 32) +MIN_PENALTY_QUOTIENT: 32 +# 2**4 ` (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**0 ` (= 1) +MAX_ATTESTER_SLASHINGS: 1 +# 2**7 ` (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 ` (= 16) +MAX_DEPOSITS: 16 +# 2**4 ` (= 16) +MAX_VOLUNTARY_EXITS: 16 +# 2**4 ` (= 16) +MAX_TRANSFERS: 16 + + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_BLOCK: 0 +DOMAIN_RANDAO: 1 +DOMAIN_ATTESTATION: 2 +DOMAIN_DEPOSIT: 3 +DOMAIN_VOLUNTARY_EXIT: 4 +DOMAIN_TRANSFER: 5 \ No newline at end of file diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml new file mode 100644 index 000000000..e4a99f303 --- /dev/null +++ b/configs/constant_presets/minimal.yaml @@ -0,0 +1,127 @@ +# Minimal preset + + +# Misc +# --------------------------------------------------------------- + + +# Just 8 shards in the minimal testing setup +SHARD_COUNT: 8 +# TODO: minimize other constants + + +# 2**7 ` (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**5 ` (= 32) +MAX_BALANCE_CHURN_QUOTIENT: 32 +# 2**12 ` (= 4,096) +MAX_ATTESTATION_PARTICIPANTS: 4096 +# 2**2 ` (= 4) +MAX_EXIT_DEQUEUES_PER_EPOCH: 4 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 + + +# Deposit contract +# --------------------------------------------------------------- +# **TBD** +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123567890123456789012357890 +# 2**5 ` (= 32) +DEPOSIT_CONTRACT_TREE_DEPTH: 32 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 ` (= 32,000,000,000) Gwei +MAX_DEPOSIT_AMOUNT: 32000000000 +# 2**4 * 10**9 ` (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +HIGH_BALANCE_INCREMENT: 1000000000 + + +# Initial values +# --------------------------------------------------------------- +GENESIS_FORK_VERSION: 0x00000000 +# 2**32, GENESIS_EPOCH is derived from this constant +GENESIS_SLOT: 4294967296 +GENESIS_START_SHARD: 0 +# 2**64 - 1 +FAR_FUTURE_EPOCH: 18446744073709551615 +BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 + + +# Time parameters +# --------------------------------------------------------------- +# 6 seconds 6 seconds +SECONDS_PER_SLOT: 6 +# 2**2 ` (= 4) slots 24 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 4 +# 2**6 ` (= 64) slots 6.4 minutes +SLOTS_PER_EPOCH: 64 +# 2**0 ` (= 1) epochs 6.4 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 ` (= 4) epochs 25.6 minutes +ACTIVATION_EXIT_DELAY: 4 +# 2**4 ` (= 16) epochs ~1.7 hours +EPOCHS_PER_ETH1_VOTING_PERIOD: 16 +# 2**13 ` (= 8,192) slots ~13 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 ` (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**11 ` (= 2,048) epochs 9 days +PERSISTENT_COMMITTEE_PERIOD: 2048 +# 2**6 ` (= 64) +MAX_CROSSLINK_EPOCHS: 64 + + +# State list lengths +# --------------------------------------------------------------- +# 2**13 ` (= 8,192) epochs ~36 days +LATEST_RANDAO_MIXES_LENGTH: 8192 +# 2**13 ` (= 8,192) epochs ~36 days +LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192 +# 2**13 ` (= 8,192) epochs ~36 days +LATEST_SLASHED_EXIT_LENGTH: 8192 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**5 ` (= 32) +BASE_REWARD_QUOTIENT: 32 +# 2**9 ` (= 512) +WHISTLEBLOWING_REWARD_QUOTIENT: 512 +# 2**3 ` (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**24 ` (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT: 16777216 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**5 ` (= 32) +MIN_PENALTY_QUOTIENT: 32 +# 2**4 ` (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**0 ` (= 1) +MAX_ATTESTER_SLASHINGS: 1 +# 2**7 ` (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 ` (= 16) +MAX_DEPOSITS: 16 +# 2**4 ` (= 16) +MAX_VOLUNTARY_EXITS: 16 +# 2**4 ` (= 16) +MAX_TRANSFERS: 16 + + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_BLOCK: 0 +DOMAIN_RANDAO: 1 +DOMAIN_ATTESTATION: 2 +DOMAIN_DEPOSIT: 3 +DOMAIN_VOLUNTARY_EXIT: 4 +DOMAIN_TRANSFER: 5 \ No newline at end of file diff --git a/configs/fork_timelines/README.md b/configs/fork_timelines/README.md new file mode 100644 index 000000000..c93b415f5 --- /dev/null +++ b/configs/fork_timelines/README.md @@ -0,0 +1,18 @@ +# Fork timelines + +This directory contains a set of fork timelines used for testing, testnets, and mainnet. + +A timeline file contains all the forks known for its target. +Later forks can be ignored, e.g. ignore fork `phase1` as a client that only supports phase 0 currently. + +## Format + +Each preset is a key-value mapping. + +**Key**: an `lower_snake_case` (a.k.a. "python case") formatted string, name of the fork. +**Value**: an unsigned integer number, epoch number of activation of the fork + +Timelines may contain comments to describe the values. + +See `mainnet.yaml` for a complete example. + diff --git a/configs/fork_timelines/mainnet.yaml b/configs/fork_timelines/mainnet.yaml new file mode 100644 index 000000000..8d51d6582 --- /dev/null +++ b/configs/fork_timelines/mainnet.yaml @@ -0,0 +1,12 @@ +# Mainnet fork timeline + +# Equal to GENESIS_EPOCH +phase0: 67108864 + +# Example 1: +# phase0_funny_fork_name: 67116000 + +# Example 2: +# Should be equal to PHASE_1_GENESIS_EPOCH +# (placeholder in example value here) +# phase1: 67163000 diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 54c478bb3..371c489b6 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -107,14 +107,16 @@ test_cases: A configuration is a separate YAML file. Separation of configuration and tests aims to: -- prevent duplication of a minimal set of tests -- make all tests easy to upgrade when a new config constant is introduced. -- clearly define which constants to use -- share-able between clients, for cross-client short or long lived testnets -- minimize the amounts of different constants permutations to compile as a client. \** - -\**: Some clients prefer compile-time constants and optimizations. -They should compile for each configuration once, and run the corresponding tests per build target. +- Prevent duplication of configuration +- Make all tests easy to upgrade (e.g. when a new config constant is introduced) +- Clearly define which constants to use +- Shareable between clients, for cross-client short or long lived testnets +- Minimize the amounts of different constants permutations to compile as a client. + Note: Some clients prefer compile-time constants and optimizations. + They should compile for each configuration once, and run the corresponding tests per build target. + +The format is described in `configs/constant_presets`. + ## Fork-timeline @@ -124,18 +126,20 @@ A fork timeline is (preferably) loaded in as a configuration object into a clien - we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event), a client should be able to activate a fork dynamically. +The format is described in `configs/fork_timelines`. + ## Config sourcing The constants configurations are located in: ``` -/configs/constants/.yaml +/configs/constant_presets/.yaml ``` And copied by CI for testing purposes to: ``` -/configs/constants/.yaml +/configs/constant_presets/.yaml ``` From c5d2696feb0d30a1c8c2b084c3dc3f72e1371c83 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 16:17:42 +1000 Subject: [PATCH 200/481] include minimal testing constants from previous pytests --- configs/constant_presets/minimal.yaml | 33 ++++++++++++--------------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index e4a99f303..e4c869ded 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -4,14 +4,11 @@ # Misc # --------------------------------------------------------------- - -# Just 8 shards in the minimal testing setup +# [customized] Just 8 shards for testing purposes SHARD_COUNT: 8 -# TODO: minimize other constants - -# 2**7 ` (= 128) -TARGET_COMMITTEE_SIZE: 128 +# [customized] unsecure, but fast +TARGET_COMMITTEE_SIZE: 4 # 2**5 ` (= 32) MAX_BALANCE_CHURN_QUOTIENT: 32 # 2**12 ` (= 4,096) @@ -57,18 +54,18 @@ BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 # --------------------------------------------------------------- # 6 seconds 6 seconds SECONDS_PER_SLOT: 6 -# 2**2 ` (= 4) slots 24 seconds -MIN_ATTESTATION_INCLUSION_DELAY: 4 -# 2**6 ` (= 64) slots 6.4 minutes -SLOTS_PER_EPOCH: 64 +# [customized] 2 slots +MIN_ATTESTATION_INCLUSION_DELAY: 2 +# [customized] fast epochs +SLOTS_PER_EPOCH: 8 # 2**0 ` (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 # 2**2 ` (= 4) epochs 25.6 minutes ACTIVATION_EXIT_DELAY: 4 # 2**4 ` (= 16) epochs ~1.7 hours EPOCHS_PER_ETH1_VOTING_PERIOD: 16 -# 2**13 ` (= 8,192) slots ~13 hours -SLOTS_PER_HISTORICAL_ROOT: 8192 +# [customized] smaller state +SLOTS_PER_HISTORICAL_ROOT: 64 # 2**8 ` (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 # 2**11 ` (= 2,048) epochs 9 days @@ -79,12 +76,12 @@ MAX_CROSSLINK_EPOCHS: 64 # State list lengths # --------------------------------------------------------------- -# 2**13 ` (= 8,192) epochs ~36 days -LATEST_RANDAO_MIXES_LENGTH: 8192 -# 2**13 ` (= 8,192) epochs ~36 days -LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192 -# 2**13 ` (= 8,192) epochs ~36 days -LATEST_SLASHED_EXIT_LENGTH: 8192 +# [customized] smaller state +LATEST_RANDAO_MIXES_LENGTH: 64 +# [customized] smaller state +LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64 +# [customized] smaller state +LATEST_SLASHED_EXIT_LENGTH: 64 # Reward and penalty quotients From 117e157f29928f3fd93ed57fd4fdc649e88aff43 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 16:21:50 +1000 Subject: [PATCH 201/481] update comment, fix net naming --- specs/core/0_beacon-chain.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0beb6ca9f..ecf0434a8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -178,8 +178,9 @@ Code snippets appearing in `this style` are to be interpreted as Python code. ## Constants -Note: the default main-net values for the constants are included here for illustrative purposes. -The different configurations for main-net, test-nets, and yaml-based testing can be found in the `configs/contants/` directory. +Note: the default mainnet values for the constants are included here for spec-design purposes. +The different configurations for mainnet, testnets, and yaml-based testing can be found in the `configs/constant_presets/` directory. +These configurations are updated for releases, but may be out of sync during `dev` changes. ### Misc From 9f329956930b66bb4a92390713c4c8136ab89fe4 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 16:32:48 +1000 Subject: [PATCH 202/481] enable generator to locate configurations --- test_libs/gen_helpers/gen_base/gen_runner.py | 10 +++++++++- test_libs/gen_helpers/gen_base/gen_typing.py | 3 ++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/test_libs/gen_helpers/gen_base/gen_runner.py b/test_libs/gen_helpers/gen_base/gen_runner.py index ad729449a..4c3f69bb1 100644 --- a/test_libs/gen_helpers/gen_base/gen_runner.py +++ b/test_libs/gen_helpers/gen_base/gen_runner.py @@ -55,6 +55,13 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): default=False, help="if set overwrite test files if they exist", ) + parser.add_argument( + "-c", + "--configs-path", + dest="configs_path", + default=True, + help="specify the path of the configs directory (containing constants_presets and fork_timelines)", + ) args = parser.parse_args() output_dir = args.output_dir @@ -66,8 +73,9 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): yaml = YAML(pure=True) print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...") + print(f"Reading config presets and fork timelines from {args.configs_path}") for suite_creator in suite_creators: - suite = suite_creator() + suite = suite_creator(args.configs_path) filename = make_filename_for_test(suite) path = output_dir / filename diff --git a/test_libs/gen_helpers/gen_base/gen_typing.py b/test_libs/gen_helpers/gen_base/gen_typing.py index 1384c870f..d6bd679af 100644 --- a/test_libs/gen_helpers/gen_base/gen_typing.py +++ b/test_libs/gen_helpers/gen_base/gen_typing.py @@ -2,4 +2,5 @@ from typing import Callable, Dict, Any TestCase = Dict[str, Any] TestSuite = Dict[str, Any] -TestSuiteCreator = Callable[[], TestSuite] +# Args: +TestSuiteCreator = Callable[[str], TestSuite] From 9eb640dd3b5f95f8a2e41ef49462cbb8a0bd58ab Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 17:02:20 +1000 Subject: [PATCH 203/481] intro configuration support pkg --- scripts/phase0/build_spec.py | 21 ++++++++++++++----- scripts/phase0/function_puller.py | 3 ++- test_libs/config_helpers/README.md | 19 +++++++++++++++++ .../config_helpers/preset_loader/__init__.py | 0 .../config_helpers/preset_loader/loader.py | 18 ++++++++++++++++ test_libs/config_helpers/requirements.txt | 1 + test_libs/config_helpers/setup.py | 9 ++++++++ test_libs/gen_helpers/setup.py | 1 - 8 files changed, 65 insertions(+), 7 deletions(-) create mode 100644 test_libs/config_helpers/README.md create mode 100644 test_libs/config_helpers/preset_loader/__init__.py create mode 100644 test_libs/config_helpers/preset_loader/loader.py create mode 100644 test_libs/config_helpers/requirements.txt create mode 100644 test_libs/config_helpers/setup.py diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index fa7d1fb68..98b1cad78 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -2,13 +2,14 @@ import sys import function_puller -def build_spec(sourcefile, outfile): +def build_phase0_spec(sourcefile, outfile): code_lines = [] code_lines.append(""" from typing import ( Any, Callable, + Dict, List, NewType, Tuple, @@ -41,7 +42,7 @@ Any = None Store = None """) - code_lines += function_puller.get_lines(sourcefile) + code_lines += function_puller.get_spec(sourcefile) code_lines.append(""" # Monkey patch validator get committee code @@ -78,7 +79,16 @@ def hash(x): ret = _hash(x) hash_cache[x] = ret return ret - """) + +# Access to overwrite spec constants based on configuration +def apply_constants_preset(preset: Dict[str, Any]): + global_vars = globals() + for k, v in preset: + global_vars[k] = v + + # Deal with derived constants + GENESIS_EPOCH = slot_to_epoch(GENESIS_SLOT) +""") with open(outfile, 'w') as out: out.write("\n".join(code_lines)) @@ -86,5 +96,6 @@ def hash(x): if __name__ == '__main__': if len(sys.argv) < 3: - print("Error: spec source and outfile must defined") - build_spec(sys.argv[1], sys.argv[2]) + print("Usage: ") + build_phase0_spec(sys.argv[1], sys.argv[2]) + diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index 2cd0139c5..d0f3f66f1 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -1,7 +1,8 @@ import sys +from typing import List -def get_lines(file_name): +def get_spec(file_name) -> List[str]: code_lines = [] pulling_from = None current_name = None diff --git a/test_libs/config_helpers/README.md b/test_libs/config_helpers/README.md new file mode 100644 index 000000000..184482082 --- /dev/null +++ b/test_libs/config_helpers/README.md @@ -0,0 +1,19 @@ +# ETH 2.0 config helpers + +`preset_loader`: A util to load constants-presets with. +See [Constants-presets documentation](../../configs/constants_presets/README.md). + +Usage: + +```python +configs_path = 'configs/' + +... + +import preset_loader +from eth2spec.phase0 import spec +my_presets = preset_loader.load_presets(configs_path, 'main_net') +spec.apply_constants_preset(my_presets) +``` + +WARNING: this overwrites globals, make sure to prevent accidental collisions with other usage of the same imported specs package. diff --git a/test_libs/config_helpers/preset_loader/__init__.py b/test_libs/config_helpers/preset_loader/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/config_helpers/preset_loader/loader.py b/test_libs/config_helpers/preset_loader/loader.py new file mode 100644 index 000000000..043c58805 --- /dev/null +++ b/test_libs/config_helpers/preset_loader/loader.py @@ -0,0 +1,18 @@ +from typing import Dict, Any + +from ruamel.yaml import ( + YAML, +) +from pathlib import Path +from os.path import join + + +def load_presets(configs_dir, presets_name) -> Dict[str, Any]: + """ + Loads the given preset + :param presets_name: The name of the generator. (lowercase snake_case) + :return: Dictionary, mapping of constant-name -> constant-value + """ + path = Path(join(configs_dir, 'constant_presets', presets_name+'.yaml')) + yaml = YAML(typ='safe') + return yaml.load(path) diff --git a/test_libs/config_helpers/requirements.txt b/test_libs/config_helpers/requirements.txt new file mode 100644 index 000000000..e441a474b --- /dev/null +++ b/test_libs/config_helpers/requirements.txt @@ -0,0 +1 @@ +ruamel.yaml==0.15.87 diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py new file mode 100644 index 000000000..90ad94ee4 --- /dev/null +++ b/test_libs/config_helpers/setup.py @@ -0,0 +1,9 @@ +from distutils.core import setup + +setup( + name='config_helpers', + packages=['preset_loader'], + install_requires=[ + "ruamel.yaml==0.15.87" + ] +) diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index 88b971bf3..5de27a6db 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -2,7 +2,6 @@ from distutils.core import setup setup( name='gen_helpers', - version='1.0', packages=['gen_base'], install_requires=[ "ruamel.yaml==0.15.87", From c350aaecf7ca1214ce196bcbd079751e389abd8c Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 17:26:24 +1000 Subject: [PATCH 204/481] small format update, support new testing format in generator base pkg --- configs/fork_timelines/testing.yaml | 6 ++ specs/test_formats/README.md | 18 +++--- test_generators/README.md | 68 +++++++++++++-------- test_libs/gen_helpers/gen_base/gen_suite.py | 19 +++--- 4 files changed, 68 insertions(+), 43 deletions(-) create mode 100644 configs/fork_timelines/testing.yaml diff --git a/configs/fork_timelines/testing.yaml b/configs/fork_timelines/testing.yaml new file mode 100644 index 000000000..957a53b8c --- /dev/null +++ b/configs/fork_timelines/testing.yaml @@ -0,0 +1,6 @@ +# Testing fork timeline + +# Equal to GENESIS_EPOCH +phase0: 536870912 + +# No other forks considered in testing yet (to be implemented) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 371c489b6..f5d78193d 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -89,14 +89,14 @@ The aim is to provide clients with a well-defined scope of work to run a particu ## Test Suite ``` -title: -- Display name for the test suite -summary: -- Summarizes the test suite -forks_timeline: -- Used to determine the forking timeline -forks: -- Runner decides what to do: run for each fork, or run for all at once, each fork transition, etc. - - ... -config: -- Used to determine which set of constants to run (possibly compile time) with -runner: *MUST be consistent with folder structure* -handler: *MUST be consistent with folder structure* +title: -- Display name for the test suite +summary: -- Summarizes the test suite +forks_timeline: -- Used to determine the forking timeline +forks: -- Runner decides what to do: run for each fork, or run for all at once, each fork transition, etc. + - ... +config: -- Used to determine which set of constants to run (possibly compile time) with +runner: *MUST be consistent with folder structure* +handler: *MUST be consistent with folder structure* test_cases: ... @@ -163,7 +163,7 @@ To prevent parsing of hundreds of different YAML files to test a specific test t ``` . <--- root of eth2.0 tests repository ├── bls <--- collection of handler for a specific test-runner, example runner: "bls" -│   ├── signing <--- collection of test suites for a specific handler, example handler: "signing". If no handler, use a dummy folder "main" +│   ├── signing <--- collection of test suites for a specific handler, example handler: "signing". If no multiple handlers, use a dummy folder (e.g. "main"), and specify that in the yaml. │   │   ├── sign_msg.yml <--- an entry list of test suites │   │   ... <--- more suite files (optional) │   ... <--- more handlers diff --git a/test_generators/README.md b/test_generators/README.md index bacb7229a..c7c77c961 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -59,11 +59,12 @@ Create a `requirements.txt` in the root of your generator directory: ``` eth-utils==1.4.1 ../../test_libs/gen_helpers -``` -And optionally, to include pyspec, add: -``` +../../test_libs/config_helpers ../../test_libs/pyspec ``` +The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself, to prevent code duplication and outdated tests. +Applying configurations to the spec is easy, and enables you to create test suites with different contexts. + Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement. Install all the necessary requirements (re-run when you add more): @@ -82,45 +83,60 @@ from eth_utils import ( to_dict, to_tuple ) +from preset_loader import loader +from eth2spec.phase0 import spec @to_dict -def bar_test_case(v: int): - yield "bar_v", v - yield "bar_v_plus_1", v + 1 - yield "bar_list", list(range(v)) +def example_test_case(v: int): + yield "spec_SHARD_COUNT", spec.SHARD_COUNT + yield "example", v @to_tuple -def generate_bar_test_cases(): +def generate_example_test_cases(): for i in range(10): - yield bar_test_case(i) + yield example_test_case(i) -def bar_test_suite() -> gen_typing.TestSuite: +def example_minimal_suite(configs_path: str) -> gen_typing.TestSuite: + presets = loader.load_presets(configs_path, 'minimal') + spec.apply_constants_preset(presets) + return gen_suite.render_suite( - title="bar_minimal", + title="example_minimal", summary="Minimal example suite, testing bar.", - fork="v0.5.1", + forks_timeline="testing", + forks=["phase0"], config="minimal", - test_cases=generate_bar_test_cases()) + handler="main", + test_cases=generate_example_test_cases()) + + +def example_mainnet_suite(configs_path: str) -> gen_typing.TestSuite: + presets = loader.load_presets(configs_path, 'mainnet') + spec.apply_constants_preset(presets) + + return gen_suite.render_suite( + title="example_main_net", + summary="Main net based example suite.", + forks_timeline= "mainnet", + forks=["phase0"], + config="testing", + handler="main", + test_cases=generate_example_test_cases()) if __name__ == "__main__": - gen_runner.run_generator("foo", [bar_test_suite]) - -``` - -And to use the pyspec: - -``` -from eth2spec.phase0 import spec + gen_runner.run_generator("example", [example_minimal_suite, example_mainnet_suite]) ``` Recommendations: -- you can have more than just 1 generator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])` +- you can have more than just 1 suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])` - you can concatenate lists of test cases, if you don't want to split it up in suites. -- you can split your suite generators into different python files/packages, good for code organization. -- use config "minimal" for performance. But also implement a suite with the default config where necessary +- you can split your suite creators into different python files/packages, good for code organization. +- use config "minimal" for performance. But also implement a suite with the default config where necessary. +- you may be able to write your test suite creator in a way where it does not make assumptions on constants. + If so, you can generate test suites with different configurations for the same scenario (see example). - the test-generator accepts `--output` and `--force` (overwrite output) ## How to add a new test generator @@ -133,13 +149,13 @@ In order to add a new test generator that builds `New Tests`: with any dependencies it may need. Leave it empty if your generator has none. 3. Your generator is assumed to have a `main.py` file in its root. By adding the base generator to your requirements, you can make a generator really easily. See docs below. -4. Your generator is called with `-o some/file/path/for_testing/can/be_anything`. +4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`. The base generator helps you handle this; you only have to define suite headers, and a list of tests for each suite you generate. 5. Finally, add any linting or testing commands to the [circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml) if desired to increase code quality. - + Note: you do not have to change the makefile. However, if necessary (e.g. not using python, or mixing in other languages), submit an issue, and it can be a special case. Do note that generators should be easy to maintain, lean, and based on the spec. diff --git a/test_libs/gen_helpers/gen_base/gen_suite.py b/test_libs/gen_helpers/gen_base/gen_suite.py index fdfac8292..3459d9ae3 100644 --- a/test_libs/gen_helpers/gen_base/gen_suite.py +++ b/test_libs/gen_helpers/gen_base/gen_suite.py @@ -1,17 +1,20 @@ from typing import Iterable -from eth_utils import ( - to_dict, -) - +from eth_utils import to_dict from gen_base.gen_typing import TestCase @to_dict -def render_suite(*, title: str, summary: str, fork: str, config: str, test_cases: Iterable[TestCase]): +def render_suite(*, + title: str, summary: str, + forks_timeline: str, forks: Iterable[str], + config: str, + handler: str, + test_cases: Iterable[TestCase]): yield "title", title - if summary is not None: - yield "summary", summary - yield "fork", fork + yield "summary", summary + yield "forks_timeline", forks_timeline, + yield "forks", forks yield "config", config + yield "handler", handler yield "test_cases", test_cases From 8fdae5bced7fb05fe6a5af6892862ef701e9719a Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 17:50:39 +1000 Subject: [PATCH 205/481] support test format file structure in base generator --- test_libs/gen_helpers/gen_base/gen_runner.py | 50 +++++++++++++++----- test_libs/gen_helpers/gen_base/gen_typing.py | 6 ++- 2 files changed, 41 insertions(+), 15 deletions(-) diff --git a/test_libs/gen_helpers/gen_base/gen_runner.py b/test_libs/gen_helpers/gen_base/gen_runner.py index 4c3f69bb1..8a179b089 100644 --- a/test_libs/gen_helpers/gen_base/gen_runner.py +++ b/test_libs/gen_helpers/gen_base/gen_runner.py @@ -1,5 +1,5 @@ import argparse -import pathlib +from pathlib import Path import sys from typing import List @@ -10,14 +10,8 @@ from ruamel.yaml import ( from gen_base.gen_typing import TestSuiteCreator -def make_filename_for_test(test): - title = test["title"] - filename = title.lower().replace(" ", "_") + ".yaml" - return pathlib.Path(filename) - - def validate_output_dir(path_str): - path = pathlib.Path(path_str) + path = Path(path_str) if not path.exists(): raise argparse.ArgumentTypeError("Output directory must exist") @@ -28,6 +22,30 @@ def validate_output_dir(path_str): return path +def validate_configs_dir(path_str): + path = Path(path_str) + + if not path.exists(): + raise argparse.ArgumentTypeError("Configs directory must exist") + + if not path.is_dir(): + raise argparse.ArgumentTypeError("Config path must lead to a directory") + + if not Path(path, "constant_presets").exists(): + raise argparse.ArgumentTypeError("Constant Presets directory must exist") + + if not Path(path, "constant_presets").is_dir(): + raise argparse.ArgumentTypeError("Constant Presets path must lead to a directory") + + if not Path(path, "fork_timelines").exists(): + raise argparse.ArgumentTypeError("Fork Timelines directory must exist") + + if not Path(path, "fork_timelines").is_dir(): + raise argparse.ArgumentTypeError("Fork Timelines path must lead to a directory") + + return path + + def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): """ Implementation for a general test generator. @@ -59,7 +77,8 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): "-c", "--configs-path", dest="configs_path", - default=True, + required=True, + type=validate_configs_dir, help="specify the path of the configs directory (containing constants_presets and fork_timelines)", ) @@ -75,13 +94,18 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...") print(f"Reading config presets and fork timelines from {args.configs_path}") for suite_creator in suite_creators: - suite = suite_creator(args.configs_path) + (output_name, handler, suite) = suite_creator(args.configs_path) - filename = make_filename_for_test(suite) - path = output_dir / filename + handler_output_dir = Path(output_dir) / Path(handler) + try: + handler_output_dir.mkdir() + except FileNotFoundError as e: + sys.exit(f'Error when creating handler dir {handler} for test "{suite["title"]}" ({e})') + + out_path = handler_output_dir / Path(output_name + '.yaml') try: - with path.open(file_mode) as f: + with out_path.open(file_mode) as f: yaml.dump(suite, f) except IOError as e: sys.exit(f'Error when dumping test "{suite["title"]}" ({e})') diff --git a/test_libs/gen_helpers/gen_base/gen_typing.py b/test_libs/gen_helpers/gen_base/gen_typing.py index d6bd679af..1cb315315 100644 --- a/test_libs/gen_helpers/gen_base/gen_typing.py +++ b/test_libs/gen_helpers/gen_base/gen_typing.py @@ -1,6 +1,8 @@ -from typing import Callable, Dict, Any +from typing import Callable, Dict, Tuple, Any TestCase = Dict[str, Any] TestSuite = Dict[str, Any] +# Tuple: (output name, handler name, suite) -- output name excl. ".yaml" +TestSuiteOutput = Tuple[str, str, TestSuite] # Args: -TestSuiteCreator = Callable[[str], TestSuite] +TestSuiteCreator = Callable[[str], TestSuiteOutput] From 42dc003e911ff98d3eede0238e4c2f469426a4a3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 7 Apr 2019 17:55:38 +1000 Subject: [PATCH 206/481] add previous_crosslink_root and enforce crosslinks form a chain --- specs/core/0_beacon-chain.md | 36 ++++++++++------- .../test_process_crosslinks.py | 39 ++++++++++++++++++- tests/phase0/helpers.py | 3 +- 3 files changed, 61 insertions(+), 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 843896629..dd003f83e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -316,6 +316,8 @@ The types are defined topologically to aid in facilitating an executable version 'epoch': 'uint64', # Shard data since the previous crosslink 'crosslink_data_root': 'bytes32', + # Root of the previous crosslink + 'previous_crosslink_root': 'bytes32', } ``` @@ -358,7 +360,7 @@ The types are defined topologically to aid in facilitating an executable version # Crosslink vote 'shard': 'uint64', - 'source_crosslink': Crosslink, + 'source_crosslink_root': 'bytes32', 'crosslink_data_root': 'bytes32', } ``` @@ -1565,8 +1567,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], finalized_root=ZERO_HASH, # Recent state - current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), - previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), @@ -1771,24 +1773,28 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe **Note**: Total balances computed for the previous epoch might be marginally different than the actual total balances during the previous epoch transition. Due to the tight bound on validator churn each epoch and small per-epoch rewards/penalties, the potential balance difference is very low and only marginally affects consensus safety. ```python -def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: +def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Shard) -> Tuple[Bytes32, Bytes32, List[ValidatorIndex]]: attestations = state.current_epoch_attestations if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_epoch_attestations valid_attestations = [a for a in attestations if a.data.shard == shard] - all_roots = [a.data.crosslink_data_root for a in valid_attestations] + all_roots = [(a.data.crosslink_data_root, a.data.source_crosslink_root) for a in valid_attestations] # handle when no attestations for shard available if len(all_roots) == 0: - return ZERO_HASH, [] + return ZERO_HASH, ZERO_HASH, [] def get_attestations_for(root) -> List[PendingAttestation]: return [a for a in valid_attestations if a.data.crosslink_data_root == root] # Winning crosslink root is the root with the most votes for it, ties broken in favor of # lexicographically higher hash - winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) + winning_root, previous_crosslink_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r[0])), r[0])) - return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) + return ( + winning_root, + previous_crosslink_root, + get_attesting_indices(state, get_attestations_for(winning_root)), + ) ``` ```python @@ -1873,13 +1879,15 @@ def process_crosslinks(state: BeaconState) -> None: for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, slot, shard) + winning_root, previous_crosslink_root, participants = get_winning_root_and_participants(state, slot, shard) + expected_crosslink_root = hash_tree_root(state.current_crosslinks[shard]) participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) - if 3 * participating_balance >= 2 * total_balance: + if previous_crosslink_root == expected_crosslink_root and 3 * participating_balance >= 2 * total_balance: state.current_crosslinks[shard] = Crosslink( epoch=min(slot_to_epoch(slot), state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), crosslink_data_root=winning_root, + previous_crosslink_root=previous_crosslink_root, ) ``` @@ -1987,7 +1995,7 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) for slot in range(previous_epoch_start_slot, current_epoch_start_slot): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, slot, shard) + winning_root, _, participants = get_winning_root_and_participants(state, slot, shard) participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: @@ -2338,9 +2346,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check target epoch, source epoch, source root, and source crosslink target_epoch = slot_to_epoch(data.slot) - assert (target_epoch, data.source_epoch, data.source_root, data.source_crosslink) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, state.current_crosslinks[data.shard]), - (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, state.previous_crosslinks[data.shard]), + assert (target_epoch, data.source_epoch, data.source_root, data.source_crosslink_root) in { + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, hash_tree_root(state.current_crosslinks[data.shard])), + (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, hash_tree_root(state.previous_crosslinks[data.shard])), } # Check crosslink data root diff --git a/tests/phase0/epoch_processing/test_process_crosslinks.py b/tests/phase0/epoch_processing/test_process_crosslinks.py index a69950f21..f2be142c6 100644 --- a/tests/phase0/epoch_processing/test_process_crosslinks.py +++ b/tests/phase0/epoch_processing/test_process_crosslinks.py @@ -29,8 +29,6 @@ pytestmark = pytest.mark.crosslinks def run_process_crosslinks(state, valid=True): - post_state = deepcopy(state) - # transition state to slot before state transition slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 block = build_empty_block_for_next_slot(state) @@ -40,6 +38,7 @@ def run_process_crosslinks(state, valid=True): # cache state before epoch transition cache_state(state) + post_state = deepcopy(state) process_crosslinks(post_state) return state, post_state @@ -90,3 +89,39 @@ def test_single_crosslink_update_from_previous_epoch(state): assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] return pre_state, post_state + + +def test_double_late_crosslink(state): + next_epoch(state) + state.slot += 4 + + attestation_1 = get_valid_attestation(state) + fill_aggregate_attestation(state, attestation_1) + + # add attestation_1 in the next epoch + next_epoch(state) + add_attestation_to_state(state, attestation_1, state.slot + 1) + + state.slot = attestation_1.data.slot + spec.SLOTS_PER_EPOCH + attestation_2 = get_valid_attestation(state) + fill_aggregate_attestation(state, attestation_2) + + # add attestation_2 in the next epoch after attestation_1 has + # already updated the relevant crosslink + next_epoch(state) + add_attestation_to_state(state, attestation_2, state.slot + 1) + + assert len(state.previous_epoch_attestations) == 1 + assert len(state.current_epoch_attestations) == 0 + + pre_state, post_state = run_process_crosslinks(state) + + shard_1 = attestation_1.data.shard + shard_2 = attestation_2.data.shard + assert shard_1 == shard_2 + shard = shard_1 + + # ensure that the current crosslinks were not updated by the second attestation + assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + + return pre_state, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index afb03157a..1641a5290 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -34,6 +34,7 @@ from build.phase0.spec import ( get_empty_block, get_epoch_start_slot, get_genesis_beacon_state, + hash_tree_root, slot_to_epoch, verify_merkle_branch, hash, @@ -172,7 +173,7 @@ def build_attestation_data(state, slot, shard): source_root=justified_block_root, target_root=epoch_boundary_root, crosslink_data_root=spec.ZERO_HASH, - source_crosslink=deepcopy(crosslinks[shard]), + source_crosslink_root=hash_tree_root(crosslinks[shard]), ) From fea40da6fe6dafabb5f38836ac7bd7c3d961abfb Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 18:28:32 +1000 Subject: [PATCH 207/481] update generators readme example --- test_generators/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test_generators/README.md b/test_generators/README.md index c7c77c961..bd509904c 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -98,32 +98,32 @@ def generate_example_test_cases(): yield example_test_case(i) -def example_minimal_suite(configs_path: str) -> gen_typing.TestSuite: +def example_minimal_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'minimal') spec.apply_constants_preset(presets) - return gen_suite.render_suite( + return ("mini", "core", gen_suite.render_suite( title="example_minimal", summary="Minimal example suite, testing bar.", forks_timeline="testing", forks=["phase0"], config="minimal", handler="main", - test_cases=generate_example_test_cases()) + test_cases=generate_example_test_cases())) -def example_mainnet_suite(configs_path: str) -> gen_typing.TestSuite: +def example_mainnet_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'mainnet') spec.apply_constants_preset(presets) - return gen_suite.render_suite( + return ("full", "core", gen_suite.render_suite( title="example_main_net", summary="Main net based example suite.", forks_timeline= "mainnet", forks=["phase0"], config="testing", handler="main", - test_cases=generate_example_test_cases()) + test_cases=generate_example_test_cases())) if __name__ == "__main__": From ffccf742bd7429d3e421f66a266f570ae643bfec Mon Sep 17 00:00:00 2001 From: Chih Cheng Liang Date: Mon, 8 Apr 2019 09:51:13 +0800 Subject: [PATCH 208/481] replace signed_root with signing_root --- specs/core/0_beacon-chain.md | 22 ++++++++++----------- specs/core/1_custody-game.md | 2 +- specs/core/1_shard-data-chains.md | 10 +++++----- specs/simple-serialize.md | 4 ++-- specs/validator/0_beacon-chain-validator.md | 10 +++++----- tests/phase0/helpers.py | 12 +++++------ tests/phase0/test_sanity.py | 6 +++--- utils/phase0/minimal_ssz.py | 2 +- 8 files changed, 34 insertions(+), 34 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1d68eae52..000d716cd 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -51,7 +51,7 @@ - [`xor`](#xor) - [`hash`](#hash) - [`hash_tree_root`](#hash_tree_root) - - [`signed_root`](#signed_root) + - [`signing_root`](#signing_root) - [`get_temporary_block_header`](#get_temporary_block_header) - [`slot_to_epoch`](#slot_to_epoch) - [`get_previous_epoch`](#get_previous_epoch) @@ -664,9 +664,9 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere `def hash_tree_root(object: SSZSerializable) -> Bytes32` is a function for hashing objects into a single root utilizing a hash tree structure. `hash_tree_root` is defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#tree-hash). -### `signed_root` +### `signing_root` -`def signed_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots) to compute signed messages. +`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots) to compute signed messages. ### `get_temporary_block_header` @@ -680,7 +680,7 @@ def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: previous_block_root=block.previous_block_root, state_root=ZERO_HASH, block_body_root=hash_tree_root(block.body), - # signed_root(block) is used for block id purposes so signature is a stub + # signing_root(block) is used for block id purposes so signature is a stub signature=EMPTY_SIGNATURE, ) ``` @@ -1327,7 +1327,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the proof of possession proof_is_valid = bls_verify( pubkey=pubkey, - message_hash=signed_root(deposit.data), + message_hash=signing_root(deposit.data), signature=deposit.data.proof_of_possession, domain=get_domain( state.fork, @@ -1707,7 +1707,7 @@ def cache_state(state: BeaconState) -> None: state.latest_block_header.state_root = previous_slot_state_root # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signed_root(state.latest_block_header) + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signing_root(state.latest_block_header) ``` ### Per-epoch processing @@ -2197,7 +2197,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the parent matches - assert block.previous_block_root == signed_root(state.latest_block_header) + assert block.previous_block_root == signing_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) # Verify proposer is not slashed @@ -2206,7 +2206,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify proposer signature assert bls_verify( pubkey=proposer.pubkey, - message_hash=signed_root(block), + message_hash=signing_root(block), signature=block.signature, domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_BEACON_BLOCK) ) @@ -2270,7 +2270,7 @@ def process_proposer_slashing(state: BeaconState, for header in (proposer_slashing.header_1, proposer_slashing.header_2): assert bls_verify( pubkey=proposer.pubkey, - message_hash=signed_root(header), + message_hash=signing_root(header), signature=header.signature, domain=get_domain(state.fork, slot_to_epoch(header.slot), DOMAIN_BEACON_BLOCK) ) @@ -2396,7 +2396,7 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: # Verify signature assert bls_verify( pubkey=validator.pubkey, - message_hash=signed_root(exit), + message_hash=signing_root(exit), signature=exit.signature, domain=get_domain(state.fork, exit.epoch, DOMAIN_VOLUNTARY_EXIT) ) @@ -2441,7 +2441,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: # Verify that the signature is valid assert bls_verify( pubkey=transfer.pubkey, - message_hash=signed_root(transfer), + message_hash=signing_root(transfer), signature=transfer.signature, domain=get_domain(state.fork, slot_to_epoch(transfer.slot), DOMAIN_TRANSFER) ) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index e28536d34..6399a13c9 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -348,7 +348,7 @@ def process_bit_challenge(state: BeaconState, challenger = state.validator_registry[challenge.challenger_index] assert bls_verify( pubkey=challenger.pubkey, - message_hash=signed_root(challenge), + message_hash=signing_root(challenge), signature=challenge.signature, domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE), ) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 8f2d12a91..e7119b7e6 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -287,7 +287,7 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], # Check beacon block beacon_block = beacon_blocks[block.slot] - assert block.beacon_block_root == signed_root(beacon_block) + assert block.beacon_block_root == signing_root(beacon_block) assert beacon_block.slot <= block.slot: # Check state root @@ -299,12 +299,12 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], else: parent_block = next( block for block in valid_shard_blocks if - signed_root(block) == candidate.previous_block_root + signing_root(block) == candidate.previous_block_root , None) assert parent_block != None assert parent_block.shard == block.shard assert parent_block.slot < block.slot - assert signed_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root + assert signing_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root # Check attestations assert len(block.attestations) <= MAX_SHARD_ATTESTIONS @@ -319,7 +319,7 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], assert proposer_index is not None assert bls_verify( pubkey=validators[proposer_index].pubkey, - message_hash=signed_root(block), + message_hash=signing_root(block), signature=block.signature, domain=get_domain(beacon_state, slot_to_epoch(block.slot), DOMAIN_SHARD_PROPOSER) ) @@ -342,7 +342,7 @@ def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock], # Check shard block shard_block = next( block for block in valid_shard_blocks if - signed_root(block) == candidate.attestation.data.shard_block_root + signing_root(block) == candidate.attestation.data.shard_block_root , None) assert shard_block != None assert shard_block.slot == attestation.data.slot diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 378a1a7cb..b78eff93e 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -54,7 +54,7 @@ For convenience we alias: We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. -*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. +*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, etc.) objects implicitly carry their type. ### `"uintN"` @@ -108,7 +108,7 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi ## Self-signed containers -Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signed_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. +Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. ## Implementations diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 0d6033acd..7b03a910a 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -101,7 +101,7 @@ In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW To submit a deposit: * Pack the validator's [initialization parameters](#initialization) into `deposit_input`, a [`DepositInput`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositinput) SSZ object. -* Let `proof_of_possession` be the result of `bls_sign` of the `signed_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`. +* Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`. * Set `deposit_input.proof_of_possession = proof_of_possession`. * Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. * Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit_input)` as the singular `bytes` input along with a deposit `amount` in Gwei. @@ -152,7 +152,7 @@ _Note:_ there might be "skipped" slots between the `parent` and `block`. These s ##### Parent root -Set `block.previous_block_root = signed_root(parent)`. +Set `block.previous_block_root = signing_root(parent)`. ##### State root @@ -199,7 +199,7 @@ Set `block.signature = block_signature` where `block_signature` is defined as: ```python block_signature = bls_sign( privkey=validator.privkey, # privkey store locally, not in state - message_hash=signed_root(block), + message_hash=signing_root(block), domain=get_domain( fork=fork, # `fork` is the fork object at the slot `block.slot` epoch=slot_to_epoch(block.slot), @@ -255,11 +255,11 @@ Set `attestation_data.shard = shard` where `shard` is the shard associated with ##### Beacon block root -Set `attestation_data.beacon_block_root = signed_root(head_block)`. +Set `attestation_data.beacon_block_root = signing_root(head_block)`. ##### Target root -Set `attestation_data.target_root = signed_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. +Set `attestation_data.target_root = signing_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. _Note:_ This can be looked up in the state using: * Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 33f394def..61f02ea8c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -3,7 +3,7 @@ from copy import deepcopy from py_ecc import bls import build.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root +from build.phase0.utils.minimal_ssz import signing_root from build.phase0.spec import ( # constants EMPTY_SIGNATURE, @@ -110,7 +110,7 @@ def build_empty_block_for_next_slot(state): previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = signed_root(previous_block_header) + empty_block.previous_block_root = signing_root(previous_block_header) return empty_block @@ -123,7 +123,7 @@ def build_deposit_data(state, pubkey, privkey, amount): proof_of_possession=EMPTY_SIGNATURE, ) proof_of_possession = bls.sign( - message_hash=signed_root(deposit_data), + message_hash=signing_root(deposit_data), privkey=privkey, domain=get_domain( state.fork, @@ -170,7 +170,7 @@ def build_voluntary_exit(state, epoch, validator_index, privkey): signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( - message_hash=signed_root(voluntary_exit), + message_hash=signing_root(voluntary_exit), privkey=privkey, domain=get_domain( fork=state.fork, @@ -229,12 +229,12 @@ def get_valid_proposer_slashing(state): domain_type=spec.DOMAIN_BEACON_BLOCK, ) header_1.signature = bls.sign( - message_hash=signed_root(header_1), + message_hash=signing_root(header_1), privkey=privkey, domain=domain, ) header_2.signature = bls.sign( - message_hash=signed_root(header_2), + message_hash=signing_root(header_2), privkey=privkey, domain=domain, ) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 90825242f..0930bad07 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -5,7 +5,7 @@ import pytest from py_ecc import bls import build.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root +from build.phase0.utils.minimal_ssz import signing_root from build.phase0.spec import ( # constants EMPTY_SIGNATURE, @@ -300,7 +300,7 @@ def test_voluntary_exit(state): signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( - message_hash=signed_root(voluntary_exit), + message_hash=signing_root(voluntary_exit), privkey=privkeys[validator_index], domain=get_domain( fork=pre_state.fork, @@ -387,7 +387,7 @@ def test_transfer(state): signature=EMPTY_SIGNATURE, ) transfer.signature = bls.sign( - message_hash=signed_root(transfer), + message_hash=signing_root(transfer), privkey=transfer_privkey, domain=get_domain( fork=pre_state.fork, diff --git a/utils/phase0/minimal_ssz.py b/utils/phase0/minimal_ssz.py index c4828d08f..902ed8460 100644 --- a/utils/phase0/minimal_ssz.py +++ b/utils/phase0/minimal_ssz.py @@ -205,7 +205,7 @@ def truncate(container): return truncated_class(**kwargs) -def signed_root(container): +def signing_root(container): return hash_tree_root(truncate(container)) From e246c3fb049816f90b81dcedf88c928194350fad Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 8 Apr 2019 12:59:42 +1000 Subject: [PATCH 209/481] source_crosslink_root to previous_crosslink_root --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dd003f83e..c03ce93ad 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -360,7 +360,7 @@ The types are defined topologically to aid in facilitating an executable version # Crosslink vote 'shard': 'uint64', - 'source_crosslink_root': 'bytes32', + 'previous_crosslink_root': 'bytes32', 'crosslink_data_root': 'bytes32', } ``` @@ -1777,7 +1777,7 @@ def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Sha attestations = state.current_epoch_attestations if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_epoch_attestations valid_attestations = [a for a in attestations if a.data.shard == shard] - all_roots = [(a.data.crosslink_data_root, a.data.source_crosslink_root) for a in valid_attestations] + all_roots = [(a.data.crosslink_data_root, a.data.previous_crosslink_root) for a in valid_attestations] # handle when no attestations for shard available if len(all_roots) == 0: @@ -1995,7 +1995,7 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) for slot in range(previous_epoch_start_slot, current_epoch_start_slot): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, _, participants = get_winning_root_and_participants(state, slot, shard) + winning_root, previous_crosslink_root, participants = get_winning_root_and_participants(state, slot, shard) participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: @@ -2346,7 +2346,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check target epoch, source epoch, source root, and source crosslink target_epoch = slot_to_epoch(data.slot) - assert (target_epoch, data.source_epoch, data.source_root, data.source_crosslink_root) in { + assert (target_epoch, data.source_epoch, data.source_root, data.previous_crosslink_root) in { (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, hash_tree_root(state.current_crosslinks[data.shard])), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, hash_tree_root(state.previous_crosslinks[data.shard])), } From 71a28aa1c9c6e1b997b9cebe3b20101f0ddb233b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 8 Apr 2019 13:03:29 +1000 Subject: [PATCH 210/481] fix tests --- tests/phase0/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 1641a5290..48cbdbe19 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -173,7 +173,7 @@ def build_attestation_data(state, slot, shard): source_root=justified_block_root, target_root=epoch_boundary_root, crosslink_data_root=spec.ZERO_HASH, - source_crosslink_root=hash_tree_root(crosslinks[shard]), + previous_crosslink_root=hash_tree_root(crosslinks[shard]), ) From ad07649ec3aa56737191052385aca1c2a482747c Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Mon, 18 Mar 2019 08:57:02 -0600 Subject: [PATCH 211/481] Update spec to use SOS style offset layout for variable size values. --- specs/simple-serialize.md | 61 ++++++++++++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 14 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 378a1a7cb..ef442042b 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -33,6 +33,8 @@ This is a **work in progress** describing typing, serialization and Merkleizatio ### Composite types +Composite types are limited to a maximum of `2**32 - 1` elements. + * **container**: ordered heterogenous collection of values * key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}` * **vector**: ordered fixed-length homogeneous collection of values @@ -54,43 +56,74 @@ For convenience we alias: We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. -*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. +> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. -### `"uintN"` +### Basic Types + +For basic types the `serialize` function is defined as follows. + +#### `"uintN"` ```python assert N in [8, 16, 32, 64, 128, 256] return value.to_bytes(N // 8, "little") ``` -### `"bool"` +#### `"bool"` ```python assert value in (True, False) return b"\x01" if value is True else b"\x00" ``` -### Vectors, containers, lists +### Composite Types (Vectors, Containers and Lists) + +The serialized representation of composite types is comprised of three binary segments. + +* The first segment contains the concatenation of the serialized representation of **only** the *fixed size* types: + - This section is empty in the case of a purely *variable-size* type. +* The second segment contains the concatenation of the `uint32` serialized offsets where the serialized representation of the *variable sized* types can be found in the third section. + - This section is empty in the case of a purely *fixed size* type. + - The first offset will always be the length of the serialized *fixed size* elements + the length of all of the serialized offsets. + - Subsequent offsets are the first offset + the combined lengths of the serialized representations for all of the previous *variable size* elements. +* The third segment contains the concatenation of the serialized representations of **only** the *variable size* types. + - This section is empty in the case of a purely *fixed size* type. + + +#### `"vector"`, `"container"` and `"list"` + +For conmposite types the `serialize` function is defined as follows. -If `value` is fixed-size: ```python -return "".join([serialize(element) for element in value]) +# section 1 +section_1 = ''.join([serialize(element) for element in value if is_fixed_size(element)]) + +# section 3 +section_3_parts = [serialize(element) for element in value if is_variable_size(element)] +section_3 ''.join(section_3_parts) + +# section 2 +section_1_length = len(section_1) +section_2_length = 4 * len(section_3_parts) +section_3_base_offset = section_1_length + section_2_length +section_3_lengths = [len(part) for part in section_3_parts] +section_3_offsets = [ + (section_3_base_offset + sum(section_3_lengths[:index])) + for index in range(len(section_3_parts)) +] +assert all(offset < 2**32 for offset in section_3_offsets) +section_2 = ''.join([serialize(offset) for offset in section_3_offsets]) + +return ''.join([section_1, section_2, section_3]) ``` -If `value` is variable-size: - -```python -serialized_bytes = "".join([serialize(element) for element in value]) -assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) -serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, "little") -return serialized_length + serialized_bytes -``` ## Deserialization Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations). + ## Merkleization We first define helper functions: From 4a0459a087ecd3cef8ba819a82bcb2c86188ca76 Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Mon, 18 Mar 2019 10:20:58 -0600 Subject: [PATCH 212/481] PR feedback --- specs/simple-serialize.md | 55 ++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index ef442042b..b695956d2 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -12,7 +12,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) - - [Vectors, containers, lists](#vectors-containers-lists) + - [Vectors, containers, lists](#composite-types-vectors-containers-and-lists) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Self-signed containers](#self-signed-containers) @@ -33,8 +33,6 @@ This is a **work in progress** describing typing, serialization and Merkleizatio ### Composite types -Composite types are limited to a maximum of `2**32 - 1` elements. - * **container**: ordered heterogenous collection of values * key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}` * **vector**: ordered fixed-length homogeneous collection of values @@ -78,15 +76,12 @@ return b"\x01" if value is True else b"\x00" ### Composite Types (Vectors, Containers and Lists) -The serialized representation of composite types is comprised of three binary segments. +The serialized representation of composite types is comprised of two binary segments. -* The first segment contains the concatenation of the serialized representation of **only** the *fixed size* types: - - This section is empty in the case of a purely *variable-size* type. -* The second segment contains the concatenation of the `uint32` serialized offsets where the serialized representation of the *variable sized* types can be found in the third section. - - This section is empty in the case of a purely *fixed size* type. - - The first offset will always be the length of the serialized *fixed size* elements + the length of all of the serialized offsets. - - Subsequent offsets are the first offset + the combined lengths of the serialized representations for all of the previous *variable size* elements. -* The third segment contains the concatenation of the serialized representations of **only** the *variable size* types. +* The first segment is *fixed size* for all types, containing the concatenation of *either* + - The serialized representation of value for each of the *fixed size* types + - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section. +* The second segment contains the concatenation of the serialized representations of **only** the *variable size* types. - This section is empty in the case of a purely *fixed size* type. @@ -94,28 +89,30 @@ The serialized representation of composite types is comprised of three binary se For conmposite types the `serialize` function is defined as follows. +> *Note*: The `collate` function combines the serialized *fixed size* values +> and the serialized offsets into a single array correctly ordered with respect +> to the original element types. The implementation of this logic is not +> included in this example for simplicity. + ```python -# section 1 -section_1 = ''.join([serialize(element) for element in value if is_fixed_size(element)]) - -# section 3 -section_3_parts = [serialize(element) for element in value if is_variable_size(element)] -section_3 ''.join(section_3_parts) - # section 2 -section_1_length = len(section_1) -section_2_length = 4 * len(section_3_parts) -section_3_base_offset = section_1_length + section_2_length -section_3_lengths = [len(part) for part in section_3_parts] -section_3_offsets = [ - (section_3_base_offset + sum(section_3_lengths[:index])) - for index in range(len(section_3_parts)) -] -assert all(offset < 2**32 for offset in section_3_offsets) -section_2 = ''.join([serialize(offset) for offset in section_3_offsets]) +section_2_parts = [serialize(element) for element in value if is_variable_size(element)] +section_2_lengths = [len(part) for part in section_2_parts] +section_2 ''.join(section_2_parts) -return ''.join([section_1, section_2, section_3]) +# section 1 +section_1_fixed_parts = [serialize(element) for element in value if is_fixed_size(element)] +section_1_length = sum(len(part) for part in section_1_fixed_parts) + 4 * len(section_2_parts) +section_1_offsets = [ + section_1_length + sum(section_2_lengths[:index]) + for index in range(len(section_2_parts)) +] +assert all(offset < 2**32 for offset in section_1_offsets) +section_1_parts = collate(section_1_fixed_parts, section_1_offsets) +section_1 = ''.join(section_1_parts) + +return ''.join([section_1, section_2]) ``` From 605028bbda38b7af7191cad1018ba121f240cbd7 Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Mon, 18 Mar 2019 16:28:33 -0600 Subject: [PATCH 213/481] more precise definitions for and and expand code example for how sections are created --- specs/simple-serialize.md | 42 +++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index b695956d2..b85a20656 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -62,6 +62,8 @@ For basic types the `serialize` function is defined as follows. #### `"uintN"` +A byte string of width `N // 8` containing the little-endian encode integer. + ```python assert N in [8, 16, 32, 64, 128, 256] return value.to_bytes(N // 8, "little") @@ -69,6 +71,9 @@ return value.to_bytes(N // 8, "little") #### `"bool"` +* The byte `\x00` **if** the value is `False` +* The byte `\x01` **if** the value is `True` + ```python assert value in (True, False) return b"\x01" if value is True else b"\x00" @@ -87,29 +92,46 @@ The serialized representation of composite types is comprised of two binary segm #### `"vector"`, `"container"` and `"list"` -For conmposite types the `serialize` function is defined as follows. - -> *Note*: The `collate` function combines the serialized *fixed size* values -> and the serialized offsets into a single array correctly ordered with respect -> to the original element types. The implementation of this logic is not -> included in this example for simplicity. - +An implementation of the `serialize` function for `"Vector"`, `"Container"` and +`"List"` types would take the following form. ```python -# section 2 +# The second section is just the concatenation of the serialized *variable size* elements section_2_parts = [serialize(element) for element in value if is_variable_size(element)] section_2_lengths = [len(part) for part in section_2_parts] section_2 ''.join(section_2_parts) -# section 1 +# Serialize the *fixed size* elements section_1_fixed_parts = [serialize(element) for element in value if is_fixed_size(element)] + +# Compute the length of the first section section_1_length = sum(len(part) for part in section_1_fixed_parts) + 4 * len(section_2_parts) + +# Compute the offset values for each part of the second section section_1_offsets = [ section_1_length + sum(section_2_lengths[:index]) for index in range(len(section_2_parts)) ] assert all(offset < 2**32 for offset in section_1_offsets) -section_1_parts = collate(section_1_fixed_parts, section_1_offsets) + +# compute the appropriate indices for *fixed size* elements for the first section +fixed_size_element_indices = [index for index, element in enumerate(value) if is_fixed_size(element)] + +# compute the appropriate indices for the offsets of the *variable size* elements +variable_size_element_indices = [index for index, element in enumerate(value) if is_variable_size(element)] + +# create a list with placeholders for all values +section_1_parts = [None] * len(value) + +# populate all of the serialized *fixed size* elements +for index, data in zip(fixed_size_element_indices, section_1_fixed_parts): + section_1_parts[index] = data + +# populate all of the serialized offsets for the *variable size* elements +for index, offset in zip(variable_size_element_indices, section_1_offsets): + section_1_parts[index] = serialize(offset) + +assert not any(part is None for part in section_1_parts) section_1 = ''.join(section_1_parts) return ''.join([section_1, section_2]) From fa66640a00aed4fae5f8878d00ff02b6e50b6705 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 20 Mar 2019 09:01:27 -0600 Subject: [PATCH 214/481] Update specs/simple-serialize.md Co-Authored-By: pipermerriam --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index b85a20656..f2ca7c0cd 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -86,7 +86,7 @@ The serialized representation of composite types is comprised of two binary segm * The first segment is *fixed size* for all types, containing the concatenation of *either* - The serialized representation of value for each of the *fixed size* types - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section. -* The second segment contains the concatenation of the serialized representations of **only** the *variable size* types. +* The second section contains the concatenation of the serialized representations of **only** the *variable size* types. - This section is empty in the case of a purely *fixed size* type. From 32684d582a14f59ea3d02490316ae84496a099ea Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 20 Mar 2019 09:01:51 -0600 Subject: [PATCH 215/481] Update specs/simple-serialize.md Co-Authored-By: pipermerriam --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index f2ca7c0cd..8577dcd2b 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -85,7 +85,7 @@ The serialized representation of composite types is comprised of two binary segm * The first segment is *fixed size* for all types, containing the concatenation of *either* - The serialized representation of value for each of the *fixed size* types - - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section. + - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section relative to the beginning of the first section. * The second section contains the concatenation of the serialized representations of **only** the *variable size* types. - This section is empty in the case of a purely *fixed size* type. From 3741b7517b2facfd4beb50a8fd8f1d021d2cb7b1 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 20 Mar 2019 09:02:08 -0600 Subject: [PATCH 216/481] Update specs/simple-serialize.md Co-Authored-By: pipermerriam --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 8577dcd2b..cf4dd55ba 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -83,7 +83,7 @@ return b"\x01" if value is True else b"\x00" The serialized representation of composite types is comprised of two binary segments. -* The first segment is *fixed size* for all types, containing the concatenation of *either* +* The first section is *fixed size* for all types, containing the concatenation of *either* - The serialized representation of value for each of the *fixed size* types - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section relative to the beginning of the first section. * The second section contains the concatenation of the serialized representations of **only** the *variable size* types. From 1ab501975cc9f615fec6d4770b60319b68672c52 Mon Sep 17 00:00:00 2001 From: jannikluhn Date: Wed, 20 Mar 2019 09:02:20 -0600 Subject: [PATCH 217/481] Update specs/simple-serialize.md Co-Authored-By: pipermerriam --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index cf4dd55ba..9d30f4f3d 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -84,7 +84,7 @@ return b"\x01" if value is True else b"\x00" The serialized representation of composite types is comprised of two binary segments. * The first section is *fixed size* for all types, containing the concatenation of *either* - - The serialized representation of value for each of the *fixed size* types + - The serialized representation for each of the *fixed size* elements of value - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section relative to the beginning of the first section. * The second section contains the concatenation of the serialized representations of **only** the *variable size* types. - This section is empty in the case of a purely *fixed size* type. From ca98d752d2586968aee22cd73402dc5cff3d2b38 Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Wed, 20 Mar 2019 09:04:55 -0600 Subject: [PATCH 218/481] d --- specs/simple-serialize.md | 52 +++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 9d30f4f3d..1669c00ea 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -81,7 +81,7 @@ return b"\x01" if value is True else b"\x00" ### Composite Types (Vectors, Containers and Lists) -The serialized representation of composite types is comprised of two binary segments. +The serialized representation of composite types is comprised of two binary sections. * The first section is *fixed size* for all types, containing the concatenation of *either* - The serialized representation for each of the *fixed size* elements of value @@ -97,41 +97,35 @@ An implementation of the `serialize` function for `"Vector"`, `"Container"` and ```python # The second section is just the concatenation of the serialized *variable size* elements -section_2_parts = [serialize(element) for element in value if is_variable_size(element)] +section_2_parts = [ + serialize(element) if is_variable_size(element) + else '' + for element in value +] section_2_lengths = [len(part) for part in section_2_parts] -section_2 ''.join(section_2_parts) +section_2 = ''.join(section_2_parts) -# Serialize the *fixed size* elements -section_1_fixed_parts = [serialize(element) for element in value if is_fixed_size(element)] - -# Compute the length of the first section -section_1_length = sum(len(part) for part in section_1_fixed_parts) + 4 * len(section_2_parts) +# Compute the length of the first section (can also be extracted from the type directly) +section_1_length = sum( + len(serialize(element)) if is_fixed_size(element) + else 4 + for element in value +) # Compute the offset values for each part of the second section section_1_offsets = [ - section_1_length + sum(section_2_lengths[:index]) - for index in range(len(section_2_parts)) + section_1_length + sum(section_2_lengths[:element_index]) if is_variable_size(element) + else None + for element_index, element in enumerate(value) ] -assert all(offset < 2**32 for offset in section_1_offsets) +assert all(offset is None or offset < 2**32 for offset in section_1_offsets) -# compute the appropriate indices for *fixed size* elements for the first section -fixed_size_element_indices = [index for index, element in enumerate(value) if is_fixed_size(element)] - -# compute the appropriate indices for the offsets of the *variable size* elements -variable_size_element_indices = [index for index, element in enumerate(value) if is_variable_size(element)] - -# create a list with placeholders for all values -section_1_parts = [None] * len(value) - -# populate all of the serialized *fixed size* elements -for index, data in zip(fixed_size_element_indices, section_1_fixed_parts): - section_1_parts[index] = data - -# populate all of the serialized offsets for the *variable size* elements -for index, offset in zip(variable_size_element_indices, section_1_offsets): - section_1_parts[index] = serialize(offset) - -assert not any(part is None for part in section_1_parts) +# The first section is the concatenation of the serialized static size elements and offsets +section_1_parts = [ + serialize(element) if is_fixed_size(element) + else serialize(section_1_offsets[element_index]) + for element_index, element in enumerate(value) +] section_1 = ''.join(section_1_parts) return ''.join([section_1, section_2]) From 5f465842a4173bb50693d67cdae69ed5c2cfc5ce Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Wed, 20 Mar 2019 09:12:49 -0600 Subject: [PATCH 219/481] more language updates --- specs/simple-serialize.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 1669c00ea..4fcefb86f 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -54,7 +54,7 @@ For convenience we alias: We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. -> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. +> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, `is_fixed_size`, `is_variable_size` etc.) objects implicitly carry their type. ### Basic Types @@ -83,17 +83,19 @@ return b"\x01" if value is True else b"\x00" The serialized representation of composite types is comprised of two binary sections. -* The first section is *fixed size* for all types, containing the concatenation of *either* - - The serialized representation for each of the *fixed size* elements of value - - The `"uint32"` serialized offset where the serialized representation of the *variable sized* type is located in the second section relative to the beginning of the first section. +* The first section is *fixed size* for all types, containing the concatenation of + - The serialized representation for each of the *fixed size* elements from the value + - The `"uint32"` serialized offset where the serialized representation of the *variable sized* elements from the value are located in the second section. * The second section contains the concatenation of the serialized representations of **only** the *variable size* types. - This section is empty in the case of a purely *fixed size* type. +> **NOTE**: Offsets are relative to the beginning of the beginning of the entire serialized representation (the start of the first section) + #### `"vector"`, `"container"` and `"list"` -An implementation of the `serialize` function for `"Vector"`, `"Container"` and -`"List"` types would take the following form. +Below is an illustrative implementation of the `serialize` function for `"Vector"`, +`"Container"` and `"List"` types. ```python # The second section is just the concatenation of the serialized *variable size* elements From 66173b8ba3869b9c130627a3d45e1d9199a8dfe5 Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Wed, 20 Mar 2019 09:14:02 -0600 Subject: [PATCH 220/481] static > fixed --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 4fcefb86f..52bd52587 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -122,7 +122,7 @@ section_1_offsets = [ ] assert all(offset is None or offset < 2**32 for offset in section_1_offsets) -# The first section is the concatenation of the serialized static size elements and offsets +# The first section is the concatenation of the serialized fixed size elements and offsets section_1_parts = [ serialize(element) if is_fixed_size(element) else serialize(section_1_offsets[element_index]) From 92f002c501dac0ff6bd0da8122dc99322450e33e Mon Sep 17 00:00:00 2001 From: Piper Merriam Date: Thu, 28 Mar 2019 08:26:18 -0600 Subject: [PATCH 221/481] specify offsets better --- specs/simple-serialize.md | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 52bd52587..6bd9e3dd7 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -83,13 +83,21 @@ return b"\x01" if value is True else b"\x00" The serialized representation of composite types is comprised of two binary sections. -* The first section is *fixed size* for all types, containing the concatenation of - - The serialized representation for each of the *fixed size* elements from the value - - The `"uint32"` serialized offset where the serialized representation of the *variable sized* elements from the value are located in the second section. -* The second section contains the concatenation of the serialized representations of **only** the *variable size* types. +* The first section is the concatenation of a mixture of either the serialized representation for *fixed size* elements **or** a serialized offset value for *variable size* elements. + - All *fixed size* elements are represented in this section as their serialized representation. + - All *variable size* elements are represented in this section with a `"uint32"` serialized offset where the serialized representation of the element is located in the second section. + - offsets are relative to the beginning of the beginning of the entire serialized representation (the start of the first section) +* The second section is the concatenation of the serialized representations of **only** the *variable size* types. - This section is empty in the case of a purely *fixed size* type. -> **NOTE**: Offsets are relative to the beginning of the beginning of the entire serialized representation (the start of the first section) + +Offset values are subject to the following validity rules: + +- For Vector and Container types: + - The first offset **must** be equal to the length of the first section. +- For all types: + - Offsets **MAY NOT** be less than any previous offset. + - Offsets **MUST** be less than `2**32` #### `"vector"`, `"container"` and `"list"` From 7a1b38a6ad6569c13fac33b11234b05b48c23ab4 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 9 Apr 2019 05:52:32 -0500 Subject: [PATCH 222/481] Homogenised start shard Alternative presentation to #884, should be substantively equivalent --- specs/core/0_beacon-chain.md | 39 ++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1d68eae52..0a4d55763 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -67,8 +67,8 @@ - [`get_permuted_index`](#get_permuted_index) - [`get_split_offset`](#get_split_offset) - [`get_epoch_committee_count`](#get_epoch_committee_count) + - [`get_shard_delta`](#get_shard_delta) - [`compute_committee`](#compute_committee) - - [`get_current_epoch_committee_count`](#get_current_epoch_committee_count) - [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot) - [`get_block_root`](#get_block_root) - [`get_state_root`](#get_state_root) @@ -843,19 +843,27 @@ def get_split_offset(list_size: int, chunks: int, index: int) -> int: ### `get_epoch_committee_count` ```python -def get_epoch_committee_count(active_validator_count: int) -> int: +def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: """ Return the number of committees in one epoch. """ + active_validators = get_active_validator_indices(state.validator_registry, epoch) return max( 1, min( SHARD_COUNT // SLOTS_PER_EPOCH, - active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + len(active_validators) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, ) ) * SLOTS_PER_EPOCH ``` +### `get_shard_delta` + +```python +def get_shard_delta(state: BeaconState, epoch: Epoch) -> int: + return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH) +``` + ### `compute_committee` ```python @@ -877,20 +885,6 @@ def compute_committee(validator_indices: List[ValidatorIndex], **Note**: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. -### `get_current_epoch_committee_count` - -```python -def get_current_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the current epoch of the given ``state``. - """ - current_active_validators = get_active_validator_indices( - state.validator_registry, - get_current_epoch(state), - ) - return get_epoch_committee_count(len(current_active_validators)) -``` - ### `get_crosslink_committees_at_slot` ```python @@ -909,16 +903,17 @@ def get_crosslink_committees_at_slot(state: BeaconState, state.validator_registry, epoch, ) - committees_per_epoch = get_epoch_committee_count(len(indices)) if epoch == current_epoch: start_shard = state.latest_start_shard elif epoch == previous_epoch: - start_shard = (state.latest_start_shard - committees_per_epoch) % SHARD_COUNT + previous_shard_delta = get_shard_delta(state, previous_epoch) + start_shard = (state.latest_start_shard - previous_shard_delta) % SHARD_COUNT elif epoch == next_epoch: - current_epoch_committees = get_current_epoch_committee_count(state) - start_shard = (state.latest_start_shard + current_epoch_committees) % SHARD_COUNT + current_shard_delta = get_shard_delta(state, current_epoch) + start_shard = (state.latest_start_shard + current_shard_delta) % SHARD_COUNT + committees_per_epoch = get_epoch_committee_count(state, epoch) committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT @@ -2088,7 +2083,7 @@ def update_registry(state: BeaconState) -> None: update_validator_registry(state) state.latest_start_shard = ( state.latest_start_shard + - get_current_epoch_committee_count(state) + get_epoch_committee_count(state, get_current_epoch(state)) ) % SHARD_COUNT ``` From 9dde3a26612cbe1636529ec0d85f8462c8271f9b Mon Sep 17 00:00:00 2001 From: vbuterin Date: Tue, 9 Apr 2019 05:59:00 -0500 Subject: [PATCH 223/481] Update replace_empty_or_append Requires adding definitions of `empty` and `typeof` to the function puller. --- specs/core/1_custody-game.md | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 41ba9d953..88341ae98 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -233,9 +233,9 @@ def epoch_to_custody_period(epoch: Epoch) -> int: ### `replace_empty_or_append` ```python -def replace_empty_or_append(list: List[Any], empty_element: Any, new_element: Any) -> int: +def replace_empty_or_append(list: List[Any], new_element: Any) -> int: for i in range(len(list)): - if list[i] == empty_element: + if list[i] == empty(typeof(new_element)): list[i] = new_element return i list.append(new_element) @@ -343,11 +343,7 @@ def process_chunk_challenge(state: BeaconState, depth=depth, chunk_index=challenge.chunk_index, ) - replace_empty_or_append( - list=state.custody_chunk_challenge_records, - empty_element=CustodyChunkChallengeRecord(), - new_element=new_record - ) + replace_empty_or_append(state.custody_chunk_challenge_records, new_record) state.custody_challenge_index += 1 # Postpone responder withdrawability @@ -413,11 +409,7 @@ def process_bit_challenge(state: BeaconState, chunk_bits=challenge.chunk_bits, responder_key=challenge.responder_key, ) - replace_empty_or_append( - list=state.custody_bit_challenge_records, - empty_element=CustodyBitChallengeRecord(), - new_element=new_record - ) + replace_empty_or_append(state.custody_bit_challenge_records, new_record) state.custody_challenge_index += 1 # Postpone responder withdrawability responder.withdrawable_epoch = FAR_FUTURE_EPOCH From 509354582c5d381b1cad38b211f0c97a182de00f Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 10 Apr 2019 11:14:22 +1000 Subject: [PATCH 224/481] limit bit-length of justification bitfield to strict 64, prevent SSZ encoding crash due to too large integer size --- specs/core/0_beacon-chain.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1d68eae52..c3161c8c0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1819,6 +1819,8 @@ def update_justification_and_finalization(state: BeaconState) -> None: # Rotate the justification bitfield up one epoch to make room for the current epoch state.justification_bitfield <<= 1 + # Python var length integers: justification bitfield is 64 bits, and may not be bigger (for SSZ serialization) + state.justification_bitfield &= (1 << 64) - 1 # If the previous epoch gets justified, fill the second last bit previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: From dbc5778f6462e478f192e1e0ff7194dc003e17f7 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Wed, 10 Apr 2019 01:42:00 -0500 Subject: [PATCH 225/481] Broken link fix (#888) [General test format] currently goes to a broken link (https://github.com/ethereum/eth2.0-specs/blob/dev/specs/test-format.md). The correct link is (https://github.com/ethereum/eth2.0-specs/blob/dev/specs/test_formats/README.md). --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d06e846d4..8ce119f13 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Core specifications for eth2.0 client validation can be found in [specs/core](sp Accompanying documents can be found in [specs](specs) and include * [SimpleSerialize (SSZ) spec](specs/simple-serialize.md) * [BLS signature verification](specs/bls_signature.md) -* [General test format](specs/test-format.md) +* [General test format](specs/test_formats/README.md) * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) * [Merkle proof formats](specs/light_client/merkle_proofs.md) * [Light client syncing protocol](specs/light_client/sync_protocol.md) From e2dc12e9f6df933e529fadfb81821737a834ee98 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 10 Apr 2019 16:24:05 +0800 Subject: [PATCH 226/481] Update specs/core/0_beacon-chain.md Co-Authored-By: ChihChengLiang --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 000d716cd..185b07443 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -666,7 +666,7 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere ### `signing_root` -`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots) to compute signed messages. +`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) to compute signing messages. ### `get_temporary_block_header` From db91c7fe9ed15b10de93f8ba2ea9157e0c37f508 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 7 Apr 2019 23:36:05 +1000 Subject: [PATCH 227/481] Work for new pyspec based test generators --- configs/constant_presets/mainnet.yaml | 2 +- configs/constant_presets/minimal.yaml | 6 +-- scripts/phase0/build_spec.py | 7 ++- scripts/phase0/function_puller.py | 26 +++++++--- .../config_helpers/preset_loader/loader.py | 11 +++- test_libs/gen_helpers/gen_base/gen_runner.py | 4 +- test_libs/pyspec/eth2spec/debug/decode.py | 28 ++++++++++ test_libs/pyspec/eth2spec/debug/encode.py | 26 ++++++++++ test_libs/pyspec/eth2spec/debug/jsonize.py | 52 ------------------- .../eth2spec/phase0/state_transition.py | 16 +++--- 10 files changed, 105 insertions(+), 73 deletions(-) create mode 100644 test_libs/pyspec/eth2spec/debug/decode.py create mode 100644 test_libs/pyspec/eth2spec/debug/encode.py delete mode 100644 test_libs/pyspec/eth2spec/debug/jsonize.py diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index 6eef9ad81..27085d40a 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -22,7 +22,7 @@ SHUFFLE_ROUND_COUNT: 90 # Deposit contract # --------------------------------------------------------------- # **TBD** -DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123567890123456789012357890 +DEPOSIT_CONTRACT_ADDRESS: 0x12345678901235678901234567890123567890 # 2**5 ` (= 32) DEPOSIT_CONTRACT_TREE_DEPTH: 32 diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index e4c869ded..f73531f38 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -22,7 +22,7 @@ SHUFFLE_ROUND_COUNT: 90 # Deposit contract # --------------------------------------------------------------- # **TBD** -DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123567890123456789012357890 +DEPOSIT_CONTRACT_ADDRESS: 0x12345678901235678901234567890123567890 # 2**5 ` (= 32) DEPOSIT_CONTRACT_TREE_DEPTH: 32 @@ -62,8 +62,8 @@ SLOTS_PER_EPOCH: 8 MIN_SEED_LOOKAHEAD: 1 # 2**2 ` (= 4) epochs 25.6 minutes ACTIVATION_EXIT_DELAY: 4 -# 2**4 ` (= 16) epochs ~1.7 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 16 +# [customized] higher frequency new deposits from eth1 for testing +EPOCHS_PER_ETH1_VOTING_PERIOD: 2 # [customized] smaller state SLOTS_PER_HISTORICAL_ROOT: 64 # 2**8 ` (= 256) epochs ~27 hours diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index 98b1cad78..54adfdde7 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -83,11 +83,14 @@ def hash(x): # Access to overwrite spec constants based on configuration def apply_constants_preset(preset: Dict[str, Any]): global_vars = globals() - for k, v in preset: + for k, v in preset.items(): global_vars[k] = v # Deal with derived constants - GENESIS_EPOCH = slot_to_epoch(GENESIS_SLOT) + global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT) + + # Initialize SSZ types again, to account for changed lengths + init_SSZ_types() """) with open(outfile, 'w') as out: diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index d0f3f66f1..fc7f9fb8c 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -6,7 +6,8 @@ def get_spec(file_name) -> List[str]: code_lines = [] pulling_from = None current_name = None - processing_typedef = False + current_typedef = None + type_defs = [] for linenum, line in enumerate(open(sys.argv[1]).readlines()): line = line.rstrip() if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': @@ -18,21 +19,26 @@ def get_spec(file_name) -> List[str]: if pulling_from is None: pulling_from = linenum else: - if processing_typedef: + if current_typedef is not None: assert code_lines[-1] == '}' code_lines[-1] = '})' + current_typedef[-1] = '})' + type_defs.append((current_name, current_typedef)) pulling_from = None - processing_typedef = False + current_typedef = None else: if pulling_from == linenum and line == '{': code_lines.append('%s = SSZType({' % current_name) - processing_typedef = True + current_typedef = ['global_vars["%s"] = SSZType({' % current_name] elif pulling_from is not None: # Add some whitespace between functions if line[:3] == 'def': - code_lines.append("") - code_lines.append("") + code_lines.append('') + code_lines.append('') code_lines.append(line) + # Remember type def lines + if current_typedef is not None: + current_typedef.append(line) elif pulling_from is None and len(line) > 0 and line[0] == '|': row = line[1:].split('|') if len(row) >= 2: @@ -48,4 +54,12 @@ def get_spec(file_name) -> List[str]: eligible = False if eligible: code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123567890123456789012357890'))) + # Build type-def re-initialization + code_lines.append('') + code_lines.append('def init_SSZ_types():') + code_lines.append(' global_vars = globals()') + for ssz_type_name, ssz_type in type_defs: + code_lines.append('') + for type_line in ssz_type: + code_lines.append(' ' + type_line) return code_lines diff --git a/test_libs/config_helpers/preset_loader/loader.py b/test_libs/config_helpers/preset_loader/loader.py index 043c58805..f37aca393 100644 --- a/test_libs/config_helpers/preset_loader/loader.py +++ b/test_libs/config_helpers/preset_loader/loader.py @@ -14,5 +14,12 @@ def load_presets(configs_dir, presets_name) -> Dict[str, Any]: :return: Dictionary, mapping of constant-name -> constant-value """ path = Path(join(configs_dir, 'constant_presets', presets_name+'.yaml')) - yaml = YAML(typ='safe') - return yaml.load(path) + yaml = YAML(typ='base') + loaded = yaml.load(path) + out = dict() + for k, v in loaded.items(): + if v.startswith("0x"): + out[k] = bytes.fromhex(v[2:]) + else: + out[k] = int(v) + return out diff --git a/test_libs/gen_helpers/gen_base/gen_runner.py b/test_libs/gen_helpers/gen_base/gen_runner.py index 8a179b089..e36d48b8b 100644 --- a/test_libs/gen_helpers/gen_base/gen_runner.py +++ b/test_libs/gen_helpers/gen_base/gen_runner.py @@ -90,6 +90,7 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): file_mode = "w" yaml = YAML(pure=True) + yaml.default_flow_style = None print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...") print(f"Reading config presets and fork timelines from {args.configs_path}") @@ -98,7 +99,8 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): handler_output_dir = Path(output_dir) / Path(handler) try: - handler_output_dir.mkdir() + if not handler_output_dir.exists(): + handler_output_dir.mkdir() except FileNotFoundError as e: sys.exit(f'Error when creating handler dir {handler} for test "{suite["title"]}" ({e})') diff --git a/test_libs/pyspec/eth2spec/debug/decode.py b/test_libs/pyspec/eth2spec/debug/decode.py new file mode 100644 index 000000000..aeac3924d --- /dev/null +++ b/test_libs/pyspec/eth2spec/debug/decode.py @@ -0,0 +1,28 @@ +from eth2spec.utils.minimal_ssz import hash_tree_root + + +def decode(json, typ): + if isinstance(typ, str) and typ[:4] == 'uint': + return json + elif typ == 'bool': + assert json in (True, False) + return json + elif isinstance(typ, list): + return [decode(element, typ[0]) for element in json] + elif isinstance(typ, str) and typ[:4] == 'byte': + return bytes.fromhex(json[2:]) + elif hasattr(typ, 'fields'): + temp = {} + for field, subtype in typ.fields.items(): + temp[field] = decode(json[field], subtype) + if field + "_hash_tree_root" in json: + assert(json[field + "_hash_tree_root"][2:] == + hash_tree_root(temp[field], subtype).hex()) + ret = typ(**temp) + if "hash_tree_root" in json: + assert(json["hash_tree_root"][2:] == + hash_tree_root(ret, typ).hex()) + return ret + else: + print(json, typ) + raise Exception("Type not recognized") diff --git a/test_libs/pyspec/eth2spec/debug/encode.py b/test_libs/pyspec/eth2spec/debug/encode.py new file mode 100644 index 000000000..f50bc9d5e --- /dev/null +++ b/test_libs/pyspec/eth2spec/debug/encode.py @@ -0,0 +1,26 @@ +from eth2spec.utils.minimal_ssz import hash_tree_root + + +def encode(value, typ, include_hash_tree_roots=False): + if isinstance(typ, str) and typ[:4] == 'uint': + return value + elif typ == 'bool': + assert value in (True, False) + return value + elif isinstance(typ, list): + return [encode(element, typ[0], include_hash_tree_roots) for element in value] + elif isinstance(typ, str) and typ[:4] == 'byte': + return '0x' + value.hex() + elif hasattr(typ, 'fields'): + ret = {} + for field, subtype in typ.fields.items(): + ret[field] = encode(getattr(value, field), subtype, include_hash_tree_roots) + if include_hash_tree_roots: + ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() + if include_hash_tree_roots: + ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() + return ret + else: + print(value, typ) + raise Exception("Type not recognized") + diff --git a/test_libs/pyspec/eth2spec/debug/jsonize.py b/test_libs/pyspec/eth2spec/debug/jsonize.py deleted file mode 100644 index 3ea6fe3f5..000000000 --- a/test_libs/pyspec/eth2spec/debug/jsonize.py +++ /dev/null @@ -1,52 +0,0 @@ -from eth2spec.utils.minimal_ssz import hash_tree_root - - -def jsonize(value, typ, include_hash_tree_roots=False): - if isinstance(typ, str) and typ[:4] == 'uint': - return value - elif typ == 'bool': - assert value in (True, False) - return value - elif isinstance(typ, list): - return [jsonize(element, typ[0], include_hash_tree_roots) for element in value] - elif isinstance(typ, str) and typ[:4] == 'byte': - return '0x' + value.hex() - elif hasattr(typ, 'fields'): - ret = {} - for field, subtype in typ.fields.items(): - ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots) - if include_hash_tree_roots: - ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() - if include_hash_tree_roots: - ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() - return ret - else: - print(value, typ) - raise Exception("Type not recognized") - - -def dejsonize(json, typ): - if isinstance(typ, str) and typ[:4] == 'uint': - return json - elif typ == 'bool': - assert json in (True, False) - return json - elif isinstance(typ, list): - return [dejsonize(element, typ[0]) for element in json] - elif isinstance(typ, str) and typ[:4] == 'byte': - return bytes.fromhex(json[2:]) - elif hasattr(typ, 'fields'): - temp = {} - for field, subtype in typ.fields.items(): - temp[field] = dejsonize(json[field], subtype) - if field + "_hash_tree_root" in json: - assert(json[field + "_hash_tree_root"][2:] == - hash_tree_root(temp[field], subtype).hex()) - ret = typ(**temp) - if "hash_tree_root" in json: - assert(json["hash_tree_root"][2:] == - hash_tree_root(ret, typ).hex()) - return ret - else: - print(json, typ) - raise Exception("Type not recognized") diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py index 59ff07d37..d869fc408 100644 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -99,13 +99,17 @@ def process_epoch_transition(state: BeaconState) -> None: spec.finish_epoch_update(state) -def state_transition(state: BeaconState, - block: BeaconBlock, - verify_state_root: bool=False) -> BeaconState: - while state.slot < block.slot: +def state_transition_to(state: BeaconState, up_to: int) -> BeaconState: + while state.slot < up_to: spec.cache_state(state) if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: process_epoch_transition(state) spec.advance_slot(state) - if block.slot == state.slot: - process_block(state, block, verify_state_root) + + +def state_transition(state: BeaconState, + block: BeaconBlock, + verify_state_root: bool=False) -> BeaconState: + state_transition_to(state, block.slot) + process_block(state, block, verify_state_root) + From aa7ca8d0171139f19c86f711476e6c4b6cabe60c Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Wed, 10 Apr 2019 13:38:48 +0100 Subject: [PATCH 228/481] Add dependencies to Makefile --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 88f17dcf9..d082dc1b9 100644 --- a/Makefile +++ b/Makefile @@ -16,11 +16,11 @@ clean: # runs a limited set of tests against a minimal config # run pytest with `-m` option to full suite -test: +test: all pytest -m minimal_config tests/ -$(BUILD_DIR)/phase0: +$(BUILD_DIR)/phase0: $(SPEC_DIR)/core/0_beacon-chain.md $(SCRIPT_DIR)/phase0/*.py $(UTILS_DIR)/phase0/*.py mkdir -p $@ python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py mkdir -p $@/utils From e5fb9626e8a4e147d6ec9a00724e0b96877f5321 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 00:20:59 +1000 Subject: [PATCH 229/481] make ssz test gen conform to general test format --- test_generators/ssz/main.py | 110 ++++++------------ test_generators/ssz/renderers.py | 9 -- test_generators/ssz/requirements.txt | 4 +- ..._test_generators.py => uint_test_cases.py} | 34 ------ 4 files changed, 37 insertions(+), 120 deletions(-) rename test_generators/ssz/{uint_test_generators.py => uint_test_cases.py} (74%) diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py index d19ec12b4..f6454c3c0 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz/main.py @@ -1,84 +1,42 @@ -import argparse -import pathlib -import sys - -from ruamel.yaml import ( - YAML, +from uint_test_cases import ( + generate_random_uint_test_cases, + generate_uint_wrong_length_test_cases, + generate_uint_bounds_test_cases, + generate_uint_out_of_bounds_test_cases ) -from uint_test_generators import ( - generate_uint_bounds_test, - generate_uint_random_test, - generate_uint_wrong_length_test, -) +from gen_base import gen_runner, gen_suite, gen_typing -test_generators = [ - generate_uint_random_test, - generate_uint_wrong_length_test, - generate_uint_bounds_test, -] +def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("uint_random", "core", gen_suite.render_suite( + title="UInt Random", + summary="Random integers chosen uniformly over the allowed value range", + forks_timeline= "mainnet", + forks=["phase0"], + config="mainnet", + handler="core", + test_cases=generate_random_uint_test_cases())) +def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("uint_wrong_length", "core", gen_suite.render_suite( + title="UInt Wrong Length", + summary="Serialized integers that are too short or too long", + forks_timeline= "mainnet", + forks=["phase0"], + config="mainnet", + handler="core", + test_cases=generate_uint_wrong_length_test_cases())) -def make_filename_for_test(test): - title = test["title"] - filename = title.lower().replace(" ", "_") + ".yaml" - return pathlib.Path(filename) - - -def validate_output_dir(path_str): - path = pathlib.Path(path_str) - - if not path.exists(): - raise argparse.ArgumentTypeError("Output directory must exist") - - if not path.is_dir(): - raise argparse.ArgumentTypeError("Output path must lead to a directory") - - return path - - -parser = argparse.ArgumentParser( - prog="gen-ssz-tests", - description="Generate YAML test files for SSZ and tree hashing", -) -parser.add_argument( - "-o", - "--output-dir", - dest="output_dir", - required=True, - type=validate_output_dir, - help="directory into which the generated YAML files will be dumped" -) -parser.add_argument( - "-f", - "--force", - action="store_true", - default=False, - help="if set overwrite test files if they exist", -) +def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("uint_bounds", "core", gen_suite.render_suite( + title="UInt Bounds", + summary="Integers right at or beyond the bounds of the allowed value range", + forks_timeline= "mainnet", + forks=["phase0"], + config="mainnet", + handler="core", + test_cases=generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases())) if __name__ == "__main__": - args = parser.parse_args() - output_dir = args.output_dir - if not args.force: - file_mode = "x" - else: - file_mode = "w" - - yaml = YAML(pure=True) - - print(f"generating {len(test_generators)} test files...") - for test_generator in test_generators: - test = test_generator() - - filename = make_filename_for_test(test) - path = output_dir / filename - - try: - with path.open(file_mode) as f: - yaml.dump(test, f) - except IOError as e: - sys.exit(f'Error when dumping test "{test["title"]}" ({e})') - - print("done.") + gen_runner.run_generator("ssz", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite]) \ No newline at end of file diff --git a/test_generators/ssz/renderers.py b/test_generators/ssz/renderers.py index e551ab14c..ee8a92838 100644 --- a/test_generators/ssz/renderers.py +++ b/test_generators/ssz/renderers.py @@ -91,12 +91,3 @@ def render_test_case(*, sedes, valid, value=None, serial=None, description=None, if description is not None: yield description yield "tags", tags - - -@to_dict -def render_test(*, title, summary, fork, test_cases): - yield "title", title, - if summary is not None: - yield "summary", summary - yield "fork", fork - yield "test_cases", test_cases diff --git a/test_generators/ssz/requirements.txt b/test_generators/ssz/requirements.txt index 88193a01d..94afc9d91 100644 --- a/test_generators/ssz/requirements.txt +++ b/test_generators/ssz/requirements.txt @@ -1,2 +1,4 @@ -ruamel.yaml==0.15.87 +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers ssz==0.1.0a2 diff --git a/test_generators/ssz/uint_test_generators.py b/test_generators/ssz/uint_test_cases.py similarity index 74% rename from test_generators/ssz/uint_test_generators.py rename to test_generators/ssz/uint_test_cases.py index c8c841fe7..d123564ca 100644 --- a/test_generators/ssz/uint_test_generators.py +++ b/test_generators/ssz/uint_test_cases.py @@ -9,7 +9,6 @@ from ssz.sedes import ( UInt, ) from renderers import ( - render_test, render_test_case, ) @@ -25,39 +24,6 @@ def get_random_bytes(length): return bytes(random.randint(0, 255) for _ in range(length)) -def generate_uint_bounds_test(): - test_cases = generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases() - - return render_test( - title="UInt Bounds", - summary="Integers right at or beyond the bounds of the allowed value range", - fork="phase0-0.2.0", - test_cases=test_cases, - ) - - -def generate_uint_random_test(): - test_cases = generate_random_uint_test_cases() - - return render_test( - title="UInt Random", - summary="Random integers chosen uniformly over the allowed value range", - fork="phase0-0.2.0", - test_cases=test_cases, - ) - - -def generate_uint_wrong_length_test(): - test_cases = generate_uint_wrong_length_test_cases() - - return render_test( - title="UInt Wrong Length", - summary="Serialized integers that are too short or too long", - fork="phase0-0.2.0", - test_cases=test_cases, - ) - - @to_tuple def generate_random_uint_test_cases(): for bit_size in BIT_SIZES: From 41374957bb5e0211876f9603c89ec0497fcf2cb9 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 01:52:51 +1000 Subject: [PATCH 230/481] update shuffling --- test_generators/shuffling/constants.py | 6 - test_generators/shuffling/core_helpers.py | 95 -------- test_generators/shuffling/main.py | 251 +++++++++------------ test_generators/shuffling/requirements.txt | 6 +- test_generators/shuffling/utils.py | 6 - test_generators/shuffling/yaml_objects.py | 25 -- 6 files changed, 112 insertions(+), 277 deletions(-) delete mode 100644 test_generators/shuffling/constants.py delete mode 100644 test_generators/shuffling/core_helpers.py delete mode 100644 test_generators/shuffling/utils.py delete mode 100644 test_generators/shuffling/yaml_objects.py diff --git a/test_generators/shuffling/constants.py b/test_generators/shuffling/constants.py deleted file mode 100644 index 92862f898..000000000 --- a/test_generators/shuffling/constants.py +++ /dev/null @@ -1,6 +0,0 @@ -SLOTS_PER_EPOCH = 2**6 # 64 slots, 6.4 minutes -FAR_FUTURE_EPOCH = 2**64 - 1 # uint64 max -SHARD_COUNT = 2**10 # 1024 -TARGET_COMMITTEE_SIZE = 2**7 # 128 validators -ACTIVATION_EXIT_DELAY = 2**2 # 4 epochs -SHUFFLE_ROUND_COUNT = 90 diff --git a/test_generators/shuffling/core_helpers.py b/test_generators/shuffling/core_helpers.py deleted file mode 100644 index c424b771e..000000000 --- a/test_generators/shuffling/core_helpers.py +++ /dev/null @@ -1,95 +0,0 @@ -from typing import Any, List, NewType - -from constants import SLOTS_PER_EPOCH, SHARD_COUNT, TARGET_COMMITTEE_SIZE, SHUFFLE_ROUND_COUNT -from utils import hash -from yaml_objects import Validator - -Epoch = NewType("Epoch", int) -ValidatorIndex = NewType("ValidatorIndex", int) -Bytes32 = NewType("Bytes32", bytes) - - -def int_to_bytes1(x): - return x.to_bytes(1, 'little') - - -def int_to_bytes4(x): - return x.to_bytes(4, 'little') - - -def bytes_to_int(data: bytes) -> int: - return int.from_bytes(data, 'little') - - -def is_active_validator(validator: Validator, epoch: Epoch) -> bool: - """ - Check if ``validator`` is active. - """ - return validator.activation_epoch <= epoch < validator.exit_epoch - - -def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: - """ - Get indices of active validators from ``validators``. - """ - return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] - - -def split(values: List[Any], split_count: int) -> List[List[Any]]: - """ - Splits ``values`` into ``split_count`` pieces. - """ - list_length = len(values) - return [ - values[(list_length * i // split_count): (list_length * (i + 1) // split_count)] - for i in range(split_count) - ] - - -def get_epoch_committee_count(active_validator_count: int) -> int: - """ - Return the number of committees in one epoch. - """ - return max( - 1, - min( - SHARD_COUNT // SLOTS_PER_EPOCH, - active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, - ) - ) * SLOTS_PER_EPOCH - - -def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: - """ - Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. - - Utilizes 'swap or not' shuffling found in - https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf - See the 'generalized domain' algorithm on page 3. - """ - for round in range(SHUFFLE_ROUND_COUNT): - pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size - flip = (pivot - index) % list_size - position = max(index, flip) - source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position // 256)) - byte = source[(position % 256) // 8] - bit = (byte >> (position % 8)) % 2 - index = flip if bit else index - - return index - - -def get_shuffling(seed: Bytes32, - validators: List[Validator], - epoch: Epoch) -> List[List[ValidatorIndex]]: - """ - Shuffle active validators and split into crosslink committees. - Return a list of committees (each a list of validator indices). - """ - # Shuffle active validator indices - active_validator_indices = get_active_validator_indices(validators, epoch) - length = len(active_validator_indices) - shuffled_indices = [active_validator_indices[get_permuted_index(i, length, seed)] for i in range(length)] - - # Split the shuffled active validator indices - return split(shuffled_indices, get_epoch_committee_count(length)) diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py index 03352944a..e2edff7c7 100644 --- a/test_generators/shuffling/main.py +++ b/test_generators/shuffling/main.py @@ -1,160 +1,127 @@ import random -import sys -import os -import yaml - -from constants import ACTIVATION_EXIT_DELAY, FAR_FUTURE_EPOCH -from core_helpers import get_shuffling -from yaml_objects import Validator +from eth2spec.phase0.spec import * +from eth_utils import ( + to_dict, to_tuple +) +from gen_base import gen_runner, gen_suite, gen_typing +from preset_loader import loader -def noop(self, *args, **kw): - # Prevent !!str or !!binary tags - pass +@to_dict +def active_exited_validator_case(idx_max: int): + validators = [] + # Standard deviation, around 8% validators will activate or exit within + # ENTRY_EXIT_DELAY inclusive from EPOCH thus creating an edge case for validator + # shuffling + RAND_EPOCH_STD = 35 -yaml.emitter.Emitter.process_tag = noop + # TODO: fix epoch numbers + + slot = 1000 * SLOTS_PER_EPOCH + # The epoch, also a mean for the normal distribution + epoch = slot_to_epoch(slot) + MAX_EXIT_EPOCH = epoch + 5000 # Maximum exit_epoch for easier reading + for idx in range(idx_max): + v = Validator( + pubkey=bytes(random.randint(0, 255) for _ in range(48)), + withdrawal_credentials=bytes(random.randint(0, 255) for _ in range(32)), + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + initiated_exit=False, + slashed=False, + high_balance=0 + ) + # 4/5 of all validators are active + if random.random() < 0.8: + # Choose a normally distributed epoch number + rand_epoch = round(random.gauss(epoch, RAND_EPOCH_STD)) -EPOCH = 1000 # The epoch, also a mean for the normal distribution + # for 1/2 of *active* validators rand_epoch is the activation epoch + if random.random() < 0.5: + v.activation_epoch = rand_epoch -# Standard deviation, around 8% validators will activate or exit within -# ENTRY_EXIT_DELAY inclusive from EPOCH thus creating an edge case for validator -# shuffling -RAND_EPOCH_STD = 35 -MAX_EXIT_EPOCH = 5000 # Maximum exit_epoch for easier reading - - -def active_exited_validators_generator(): - """ - Random cases with variety of validator's activity status - """ - # Order not preserved - https://github.com/yaml/pyyaml/issues/110 - metadata = { - 'title': 'Shuffling Algorithm Tests 1', - 'summary': 'Test vectors for validator shuffling with different validator\'s activity status.' - ' Note: only relevant validator fields are defined.', - 'test_suite': 'shuffle', - 'fork': 'phase0-0.5.0', - } - - # Config - random.seed(int("0xEF00BEAC", 16)) - num_cases = 10 - - test_cases = [] - - for case in range(num_cases): - seedhash = bytes(random.randint(0, 255) for byte in range(32)) - idx_max = random.randint(128, 512) - - validators = [] - for idx in range(idx_max): - v = Validator(original_index=idx) - # 4/5 of all validators are active - if random.random() < 0.8: - # Choose a normally distributed epoch number - rand_epoch = round(random.gauss(EPOCH, RAND_EPOCH_STD)) - - # for 1/2 of *active* validators rand_epoch is the activation epoch + # 1/4 of active validators will exit in forseeable future if random.random() < 0.5: - v.activation_epoch = rand_epoch - - # 1/4 of active validators will exit in forseeable future - if random.random() < 0.5: - v.exit_epoch = random.randint( - rand_epoch + ACTIVATION_EXIT_DELAY + 1, MAX_EXIT_EPOCH) - # 1/4 of active validators in theory remain in the set indefinitely - else: - v.exit_epoch = FAR_FUTURE_EPOCH - # for the other active 1/2 rand_epoch is the exit epoch + v.exit_epoch = random.randint( + rand_epoch + ACTIVATION_EXIT_DELAY + 1, MAX_EXIT_EPOCH) + # 1/4 of active validators in theory remain in the set indefinitely else: - v.activation_epoch = random.randint( - 0, rand_epoch - ACTIVATION_EXIT_DELAY) - v.exit_epoch = rand_epoch - - # The remaining 1/5 of all validators is not activated + v.exit_epoch = FAR_FUTURE_EPOCH + # for the other active 1/2 rand_epoch is the exit epoch else: - v.activation_epoch = FAR_FUTURE_EPOCH - v.exit_epoch = FAR_FUTURE_EPOCH + v.activation_epoch = random.randint( + 0, rand_epoch - ACTIVATION_EXIT_DELAY) + v.exit_epoch = rand_epoch - validators.append(v) - - input_ = { - 'validators': validators, - 'epoch': EPOCH - } - output = get_shuffling( - seedhash, validators, input_['epoch']) - - test_cases.append({ - 'seed': '0x' + seedhash.hex(), 'input': input_, 'output': output - }) - - return { - 'metadata': metadata, - 'filename': 'test_vector_shuffling.yml', - 'test_cases': test_cases - } - - -def validators_set_size_variety_generator(): - """ - Different validator set size cases, inspired by removed manual `permutated_index` tests - https://github.com/ethereum/eth2.0-test-generators/tree/bcd9ab2933d9f696901d1dfda0828061e9d3093f/permutated_index - """ - # Order not preserved - https://github.com/yaml/pyyaml/issues/110 - metadata = { - 'title': 'Shuffling Algorithm Tests 2', - 'summary': 'Test vectors for validator shuffling with different validator\'s set size.' - ' Note: only relevant validator fields are defined.', - 'test_suite': 'shuffle', - 'fork': 'tchaikovsky', - 'version': 1.0 - } - - # Config - random.seed(int("0xEF00BEAC", 16)) - - test_cases = [] - - seedhash = bytes(random.randint(0, 255) for byte in range(32)) - idx_max = 4096 - set_sizes = [1, 2, 3, 1024, idx_max] - - for size in set_sizes: - validators = [] - for idx in range(size): - v = Validator(original_index=idx) - v.activation_epoch = EPOCH + # The remaining 1/5 of all validators is not activated + else: + v.activation_epoch = FAR_FUTURE_EPOCH v.exit_epoch = FAR_FUTURE_EPOCH - validators.append(v) - input_ = { - 'validators': validators, - 'epoch': EPOCH - } - output = get_shuffling( - seedhash, validators, input_['epoch']) - test_cases.append({ - 'seed': '0x' + seedhash.hex(), 'input': input_, 'output': output - }) + validators.append(v) - return { - 'metadata': metadata, - 'filename': 'shuffling_set_size.yml', - 'test_cases': test_cases - } + query_slot = slot + random.randint(-1, 1) + state = get_genesis_beacon_state([], 0, None) + state.validator_registry = validators + state.latest_randao_mixes = [b'\xde\xad\xbe\xef' * 8 for _ in range(LATEST_RANDAO_MIXES_LENGTH)] + state.slot = slot + state.latest_start_shard = random.randint(0, SHARD_COUNT - 1) + randao_mix = bytes(random.randint(0, 255) for _ in range(32)) + state.latest_randao_mixes[slot_to_epoch(query_slot) % LATEST_RANDAO_MIXES_LENGTH] = randao_mix + + committees = get_crosslink_committees_at_slot(state, query_slot) + yield 'validator_registry', [ + { + 'activation_epoch': v.activation_epoch, + 'exit_epoch': v.exit_epoch + } for v in state.validator_registry + ] + yield 'randao_mix', '0x'+randao_mix.hex() + yield 'state_slot', state.slot + yield 'query_slot', query_slot + yield 'latest_start_shard', state.latest_start_shard + yield 'crosslink_committees', committees -if __name__ == '__main__': - output_dir = sys.argv[2] - for generator in [active_exited_validators_generator, validators_set_size_variety_generator]: - result = generator() - filename = os.path.join(output_dir, result['filename']) - with open(filename, 'w') as outfile: - # Dump at top level - yaml.dump(result['metadata'], outfile, default_flow_style=False) - # default_flow_style will unravel "ValidatorRecord" and "committee" line, exploding file size - yaml.dump({'test_cases': result['test_cases']}, outfile) +@to_tuple +def active_exited_validator_cases(): + for i in range(3): + yield active_exited_validator_case(random.randint(100, min(200, SHARD_COUNT * 2))) + + +def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'minimal') + apply_constants_preset(presets) + + return ("shuffling_minimal", "core", gen_suite.render_suite( + title="Shuffling Algorithm Tests with minimal config", + summary="Test vectors for validator shuffling with different validator registry activity status and set size." + " Note: only relevant fields are defined.", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + handler="core", + test_cases=active_exited_validator_cases())) + + +def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'mainnet') + apply_constants_preset(presets) + + return ("shuffling_full", "core", gen_suite.render_suite( + title="Shuffling Algorithm Tests with mainnet config", + summary="Test vectors for validator shuffling with different validator registry activity status and set size." + " Note: only relevant fields are defined.", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="core", + test_cases=active_exited_validator_cases())) + + +if __name__ == "__main__": + gen_runner.run_generator("shuffling", [mini_shuffling_suite, full_shuffling_suite]) diff --git a/test_generators/shuffling/requirements.txt b/test_generators/shuffling/requirements.txt index dde2fb67d..8f9bede8f 100644 --- a/test_generators/shuffling/requirements.txt +++ b/test_generators/shuffling/requirements.txt @@ -1,4 +1,4 @@ -eth-hash[pycryptodome]==0.2.0 -eth-typing==2.0.0 eth-utils==1.4.1 -PyYAML==4.2b1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec \ No newline at end of file diff --git a/test_generators/shuffling/utils.py b/test_generators/shuffling/utils.py deleted file mode 100644 index bcd2c6a3c..000000000 --- a/test_generators/shuffling/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -from eth_typing import Hash32 -from eth_utils import keccak - - -def hash(x: bytes) -> Hash32: - return keccak(x) diff --git a/test_generators/shuffling/yaml_objects.py b/test_generators/shuffling/yaml_objects.py deleted file mode 100644 index 18e45220e..000000000 --- a/test_generators/shuffling/yaml_objects.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import Any - -import yaml - - -class Validator(yaml.YAMLObject): - """ - A validator stub containing only the fields relevant for get_shuffling() - """ - fields = { - 'activation_epoch': 'uint64', - 'exit_epoch': 'uint64', - # Extra index field to ease testing/debugging - 'original_index': 'uint64', - } - - def __init__(self, **kwargs): - for k in self.fields.keys(): - setattr(self, k, kwargs.get(k)) - - def __setattr__(self, name: str, value: Any) -> None: - super().__setattr__(name, value) - - def __getattribute__(self, name: str) -> Any: - return super().__getattribute__(name) From 9cd1b4206b3266686bd4b5d9c58fe291c0e7128c Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Wed, 10 Apr 2019 13:28:24 -0500 Subject: [PATCH 231/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index b78eff93e..bc621da1a 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -115,10 +115,10 @@ Let `value` be a self-signed container object. The convention is that the signat | Language | Project | Maintainer | Implementation | |-|-|-|-| | Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) | -| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) | +| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) | | Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | | Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) | -| Javascript | Lodestart | Chain Safe Systems | [https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) | +| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) | | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) | | Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) | | Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) | From 2bda58fbdcd54355d035096d863e92279fe79664 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 11 Apr 2019 17:13:45 +1000 Subject: [PATCH 232/481] Clean up light client spec --- specs/light_client/merkle_proofs.md | 36 ++++++++++++++--------------- specs/light_client/sync_protocol.md | 21 +++++++++-------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 285445ca8..a3c8fa154 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -47,33 +47,33 @@ y_data_root len(y) We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. ```python -def path_to_encoded_form(obj: Any, path: List[str or int]) -> List[int]: +def path_to_encoded_form(obj: Any, path: List[Union[str, int]]) -> List[int]: if len(path) == 0: return [] - if isinstance(path[0], "__len__"): + elif isinstance(path[0], "__len__"): assert len(path) == 1 return [LENGTH_FLAG] elif isinstance(path[0], str) and hasattr(obj, "fields"): return [list(obj.fields.keys()).index(path[0])] + path_to_encoded_form(getattr(obj, path[0]), path[1:]) - elif isinstance(obj, (StaticList, DynamicList)): + elif isinstance(obj, (Vector, List)): return [path[0]] + path_to_encoded_form(obj[path[0]], path[1:]) else: raise Exception("Unknown type / path") ``` -We can now define a function `get_generalized_indices(object: Any, path: List[int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. +We can now define a function `get_generalized_indices(object: Any, path: List[int], root: int=1) -> List[int]` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. ```python -def get_generalized_indices(obj: Any, path: List[int], root=1) -> List[int]: +def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int]: if len(path) == 0: return [root] - elif isinstance(obj, StaticList): + elif isinstance(obj, Vector): items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk return get_generalized_indices(obj[path[0]], path[1:], new_root) - elif isinstance(obj, DynamicList) and path[0] == LENGTH_FLAG: + elif isinstance(obj, List) and path[0] == LENGTH_FLAG: return [root * 2 + 1] - elif isinstance(obj, DynamicList) and isinstance(path[0], int): + elif isinstance(obj, List) and isinstance(path[0], int): assert path[0] < len(obj) items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk @@ -137,19 +137,19 @@ Generating a proof is simply a matter of taking the node of the SSZ hash tree wi Here is the verification function: ```python -def verify_multi_proof(root, indices, leaves, proof): +def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[bytes]): tree = {} for index, leaf in zip(indices, leaves): tree[index] = leaf for index, proofitem in zip(get_proof_indices(indices), proof): tree[index] = proofitem - indexqueue = sorted(tree.keys())[:-1] + index_queue = sorted(tree.keys())[:-1] i = 0 - while i < len(indexqueue): - index = indexqueue[i] - if index >= 2 and index^1 in tree: - tree[index//2] = hash(tree[index - index%2] + tree[index - index%2 + 1]) - indexqueue.append(index//2) + while i < len(index_queue): + index = index_queue[i] + if index >= 2 and index ^ 1 in tree: + tree[index // 2] = hash(tree[index - index % 2] + tree[index - index % 2 + 1]) + index_queue.append(index // 2) i += 1 return (indices == []) or (1 in tree and tree[1] == root) ``` @@ -158,7 +158,7 @@ def verify_multi_proof(root, indices, leaves, proof): We define: -#### `MerklePartial` +#### `SSZMerklePartial` ```python @@ -172,6 +172,6 @@ We define: #### Proofs for execution -We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `MerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. +We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `SSZMerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. -Ideally, any function which accepts an SSZ object should also be able to accept a `MerklePartial` object as a substitute. +Ideally, any function which accepts an SSZ object should also be able to accept a `SSZMerklePartial` object as a substitute. diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 94ab8a2e4..7db02050e 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -5,16 +5,19 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers ## Table of Contents + - [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) - [Table of Contents](#table-of-contents) - - [Light client state](#light-client-state) - - [Updating the shuffled committee](#updating-the-shuffled-committee) - - [Computing the current committee](#computing-the-current-committee) - - [Verifying blocks](#verifying-blocks) + - [Preliminaries](#preliminaries) + - [Light client state](#light-client-state) + - [Updating the shuffled committee](#updating-the-shuffled-committee) + - [Computing the current committee](#computing-the-current-committee) + - [Verifying blocks](#verifying-blocks) + -### Preliminaries +## Preliminaries We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`). @@ -85,7 +88,7 @@ later_period_data = get_period_data(new_committee_proof, finalized_header, shard The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch. -### Computing the current committee +## Computing the current committee Here is a helper to compute the committee at a slot given the maximal earlier and later committees: @@ -134,16 +137,16 @@ def compute_committee(header: BeaconBlockHeader, Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`). -### Verifying blocks +## Verifying blocks If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply: ```python { - 'header': BlockHeader, + 'header': BeaconBlockHeader, 'shard_aggregate_signature': 'bytes96', 'shard_bitfield': 'bytes', - 'shard_parent_block': ShardBlock + 'shard_parent_block': ShardBlock, } ``` From d7b7640221acb9981dba0818e945ded0ccdd77bf Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 18:33:46 +1000 Subject: [PATCH 233/481] overhaul shuffling tests, focus on swap-or-not shuffle --- configs/constant_presets/minimal.yaml | 2 +- test_generators/shuffling/README.md | 26 +++--- test_generators/shuffling/main.py | 109 ++++---------------------- 3 files changed, 31 insertions(+), 106 deletions(-) diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index f73531f38..8997bc5ed 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -16,7 +16,7 @@ MAX_ATTESTATION_PARTICIPANTS: 4096 # 2**2 ` (= 4) MAX_EXIT_DEQUEUES_PER_EPOCH: 4 # See issue 563 -SHUFFLE_ROUND_COUNT: 90 +SHUFFLE_ROUND_COUNT: 10 # Deposit contract diff --git a/test_generators/shuffling/README.md b/test_generators/shuffling/README.md index 047a1b872..5cf9d50e0 100644 --- a/test_generators/shuffling/README.md +++ b/test_generators/shuffling/README.md @@ -1,16 +1,16 @@ -# Shuffling Test Generator +# Shuffling Tests -``` -2018 Status Research & Development GmbH -Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). +Tests for the swap-or-not shuffling in ETH 2.0. -This work uses public domain work under CC0 from the Ethereum Foundation -https://github.com/ethereum/eth2.0-specs -``` +For implementers, possible test runners implementing testing can include: +1) just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation) +2) test un-permute-index (the reverse lookup. Implemented by running the shuffling rounds in reverse: from `round_count-1` to `0`) +3) test the optimized complete shuffle, where all indices are shuffled at once, test output in one go. +4) test complete shuffle in reverse (reverse rounds, same as 2) - -This file implements a test vectors generator for the shuffling algorithm described in the Ethereum -[specs](https://github.com/ethereum/eth2.0-specs/blob/2983e68f0305551083fac7fcf9330c1fc9da3411/specs/core/0_beacon-chain.md#get_new_shuffling) - -Utilizes 'swap or not' shuffling found in [An Enciphering Scheme Based on a Card Shuffle](https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf). -See the `Generalized domain` algorithm on page 3. +Tips for initial shuffling write: +- run with `round_count = 1` first, do the same with pyspec. +- start with permute index +- optimized shuffling implementations: + - vitalik, Python: https://github.com/ethereum/eth2.0-specs/pull/576#issue-250741806 + - protolambda, Go: https://github.com/protolambda/eth2-shuffle diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py index e2edff7c7..e8b2054a2 100644 --- a/test_generators/shuffling/main.py +++ b/test_generators/shuffling/main.py @@ -1,6 +1,4 @@ -import random - -from eth2spec.phase0.spec import * +from eth2spec.phase0 import spec from eth_utils import ( to_dict, to_tuple ) @@ -9,118 +7,45 @@ from preset_loader import loader @to_dict -def active_exited_validator_case(idx_max: int): - validators = [] - - # Standard deviation, around 8% validators will activate or exit within - # ENTRY_EXIT_DELAY inclusive from EPOCH thus creating an edge case for validator - # shuffling - RAND_EPOCH_STD = 35 - - # TODO: fix epoch numbers - - slot = 1000 * SLOTS_PER_EPOCH - # The epoch, also a mean for the normal distribution - epoch = slot_to_epoch(slot) - MAX_EXIT_EPOCH = epoch + 5000 # Maximum exit_epoch for easier reading - - for idx in range(idx_max): - v = Validator( - pubkey=bytes(random.randint(0, 255) for _ in range(48)), - withdrawal_credentials=bytes(random.randint(0, 255) for _ in range(32)), - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, - slashed=False, - high_balance=0 - ) - # 4/5 of all validators are active - if random.random() < 0.8: - # Choose a normally distributed epoch number - rand_epoch = round(random.gauss(epoch, RAND_EPOCH_STD)) - - # for 1/2 of *active* validators rand_epoch is the activation epoch - if random.random() < 0.5: - v.activation_epoch = rand_epoch - - # 1/4 of active validators will exit in forseeable future - if random.random() < 0.5: - v.exit_epoch = random.randint( - rand_epoch + ACTIVATION_EXIT_DELAY + 1, MAX_EXIT_EPOCH) - # 1/4 of active validators in theory remain in the set indefinitely - else: - v.exit_epoch = FAR_FUTURE_EPOCH - # for the other active 1/2 rand_epoch is the exit epoch - else: - v.activation_epoch = random.randint( - 0, rand_epoch - ACTIVATION_EXIT_DELAY) - v.exit_epoch = rand_epoch - - # The remaining 1/5 of all validators is not activated - else: - v.activation_epoch = FAR_FUTURE_EPOCH - v.exit_epoch = FAR_FUTURE_EPOCH - - validators.append(v) - - query_slot = slot + random.randint(-1, 1) - state = get_genesis_beacon_state([], 0, None) - state.validator_registry = validators - state.latest_randao_mixes = [b'\xde\xad\xbe\xef' * 8 for _ in range(LATEST_RANDAO_MIXES_LENGTH)] - state.slot = slot - state.latest_start_shard = random.randint(0, SHARD_COUNT - 1) - randao_mix = bytes(random.randint(0, 255) for _ in range(32)) - state.latest_randao_mixes[slot_to_epoch(query_slot) % LATEST_RANDAO_MIXES_LENGTH] = randao_mix - - committees = get_crosslink_committees_at_slot(state, query_slot) - yield 'validator_registry', [ - { - 'activation_epoch': v.activation_epoch, - 'exit_epoch': v.exit_epoch - } for v in state.validator_registry - ] - yield 'randao_mix', '0x'+randao_mix.hex() - yield 'state_slot', state.slot - yield 'query_slot', query_slot - yield 'latest_start_shard', state.latest_start_shard - yield 'crosslink_committees', committees +def shuffling_case(seed: spec.Bytes32, count: int): + yield 'seed', '0x' + seed.hex() + yield 'count', count + yield 'shuffled', [spec.get_permuted_index(i, count, seed) for i in range(count)] @to_tuple -def active_exited_validator_cases(): - for i in range(3): - yield active_exited_validator_case(random.randint(100, min(200, SHARD_COUNT * 2))) +def shuffling_test_cases(): + for seed in [spec.hash(spec.int_to_bytes4(seed_init_value)) for seed_init_value in range(30)]: + for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000]: + yield shuffling_case(seed, count) def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'minimal') - apply_constants_preset(presets) + spec.apply_constants_preset(presets) return ("shuffling_minimal", "core", gen_suite.render_suite( - title="Shuffling Algorithm Tests with minimal config", - summary="Test vectors for validator shuffling with different validator registry activity status and set size." - " Note: only relevant fields are defined.", + title="Swap-or-Not Shuffling tests with minimal config", + summary="Swap or not shuffling, with minimally configured testing round-count", forks_timeline="testing", forks=["phase0"], config="minimal", handler="core", - test_cases=active_exited_validator_cases())) + test_cases=shuffling_test_cases())) def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: presets = loader.load_presets(configs_path, 'mainnet') - apply_constants_preset(presets) + spec.apply_constants_preset(presets) return ("shuffling_full", "core", gen_suite.render_suite( - title="Shuffling Algorithm Tests with mainnet config", - summary="Test vectors for validator shuffling with different validator registry activity status and set size." - " Note: only relevant fields are defined.", + title="Swap-or-Not Shuffling tests with mainnet config", + summary="Swap or not shuffling, with normal configured (secure) mainnet round-count", forks_timeline="mainnet", forks=["phase0"], config="mainnet", handler="core", - test_cases=active_exited_validator_cases())) + test_cases=shuffling_test_cases())) if __name__ == "__main__": From 9c662ec4c36238024d332e23506833639c2842cb Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 11 Apr 2019 18:48:05 +1000 Subject: [PATCH 234/481] fix shard delta for latest shard change --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ad83b0f5f..590f42af3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2083,7 +2083,7 @@ def update_registry(state: BeaconState) -> None: update_validator_registry(state) state.latest_start_shard = ( state.latest_start_shard + - get_epoch_committee_count(state, get_current_epoch(state)) + get_shard_delta(state, get_current_epoch(state)) ) % SHARD_COUNT ``` From 9a0fd0afb927cba109bf8db4ec69df317b1dfc59 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 19:15:23 +1000 Subject: [PATCH 235/481] port BLS tests to new suite format --- test_generators/bls/main.py | 209 +++++++++++++++++---------- test_generators/bls/requirements.txt | 3 +- 2 files changed, 131 insertions(+), 81 deletions(-) diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index 4c19c6249..0ffa0f66b 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -2,16 +2,14 @@ BLS test vectors generator """ -# Standard library -import sys from typing import Tuple -# Third-party -import yaml -from py_ecc import bls +from eth_utils import ( + to_tuple, int_to_big_endian +) +from gen_base import gen_runner, gen_suite, gen_typing -# Ethereum -from eth_utils import int_to_big_endian, big_endian_to_int +from py_ecc import bls def int_to_hex(n: int) -> str: @@ -34,9 +32,9 @@ DOMAINS = [ ] MESSAGES = [ - b'\x00' * 32, - b'\x56' * 32, - b'\xab' * 32, + bytes(b'\x00' * 32), + bytes(b'\x56' * 32), + bytes(b'\xab' * 32), ] PRIVKEYS = [ @@ -80,109 +78,160 @@ def hash_message_compressed(msg: bytes, domain: int) -> Tuple[str, str]: return [int_to_hex(z1), int_to_hex(z2)] -if __name__ == '__main__': - # Order not preserved - https://github.com/yaml/pyyaml/issues/110 - metadata = { - 'title': 'BLS signature and aggregation tests', - 'summary': 'Test vectors for BLS signature', - 'test_suite': 'bls', - 'fork': 'phase0-0.5.0', - } - - case01_message_hash_G2_uncompressed = [] +@to_tuple +def case01_message_hash_G2_uncompressed(): for msg in MESSAGES: for domain in DOMAINS: - case01_message_hash_G2_uncompressed.append({ - 'input': {'message': '0x' + msg.hex(), 'domain': int_to_hex(domain)}, + yield { + 'input': { + 'message': '0x' + msg.hex(), + 'domain': int_to_hex(domain) + }, 'output': hash_message(msg, domain) - }) + } - case02_message_hash_G2_compressed = [] +@to_tuple +def case02_message_hash_G2_compressed(): for msg in MESSAGES: for domain in DOMAINS: - case02_message_hash_G2_compressed.append({ - 'input': {'message': '0x' + msg.hex(), 'domain': int_to_hex(domain)}, + yield { + 'input': { + 'message': '0x' + msg.hex(), + 'domain': int_to_hex(domain) + }, 'output': hash_message_compressed(msg, domain) - }) + } - case03_private_to_public_key = [] - #  Used in later cases +@to_tuple +def case03_private_to_public_key(): pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS] - #  Used in public key aggregation pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys] - case03_private_to_public_key = [ - { + for privkey, pubkey_serial in zip(PRIVKEYS, pubkeys_serial): + yield { 'input': int_to_hex(privkey), 'output': pubkey_serial, } - for privkey, pubkey_serial in zip(PRIVKEYS, pubkeys_serial) - ] - case04_sign_messages = [] - sigs = [] # used in verify +@to_tuple +def case04_sign_messages(): for privkey in PRIVKEYS: for message in MESSAGES: for domain in DOMAINS: sig = bls.sign(message, privkey, domain) - case04_sign_messages.append({ + yield { 'input': { 'privkey': int_to_hex(privkey), 'message': '0x' + message.hex(), 'domain': int_to_hex(domain) }, 'output': '0x' + sig.hex() - }) - sigs.append(sig) + } - # TODO: case05_verify_messages: Verify messages signed in case04 - # It takes too long, empty for now +# TODO: case05_verify_messages: Verify messages signed in case04 +# It takes too long, empty for now - case06_aggregate_sigs = [] + +@to_tuple +def case06_aggregate_sigs(): for domain in DOMAINS: for message in MESSAGES: - sigs = [] - for privkey in PRIVKEYS: - sig = bls.sign(message, privkey, domain) - sigs.append(sig) - case06_aggregate_sigs.append({ + sigs = [bls.sign(message, privkey, domain) for privkey in PRIVKEYS] + yield { 'input': ['0x' + sig.hex() for sig in sigs], 'output': '0x' + bls.aggregate_signatures(sigs).hex(), - }) + } - case07_aggregate_pubkeys = [ - { - 'input': pubkeys_serial, - 'output': '0x' + bls.aggregate_pubkeys(pubkeys).hex(), - } - ] +@to_tuple +def case07_aggregate_pubkeys(): + pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS] + pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys] + yield { + 'input': pubkeys_serial, + 'output': '0x' + bls.aggregate_pubkeys(pubkeys).hex(), + } - # TODO - # Aggregate verify - # TODO - # Proof-of-possession +# TODO +# Aggregate verify - with open(sys.argv[2] + "test_bls.yml", 'w') as outfile: - # Dump at top level - yaml.dump(metadata, outfile, default_flow_style=False) - # default_flow_style will unravel "ValidatorRecord" and "committee" line, - # exploding file size - yaml.dump( - {'case01_message_hash_G2_uncompressed': case01_message_hash_G2_uncompressed}, - outfile, - ) - yaml.dump( - {'case02_message_hash_G2_compressed': case02_message_hash_G2_compressed}, - outfile, - ) - yaml.dump( - {'case03_private_to_public_key': case03_private_to_public_key}, - outfile, - ) - yaml.dump({'case04_sign_messages': case04_sign_messages}, outfile) +# TODO +# Proof-of-possession - # Too time consuming to generate - # yaml.dump({'case05_verify_messages': case05_verify_messages}, outfile) - yaml.dump({'case06_aggregate_sigs': case06_aggregate_sigs}, outfile) - yaml.dump({'case07_aggregate_pubkeys': case07_aggregate_pubkeys}, outfile) + +def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("g2_uncompressed", "msg_hash", gen_suite.render_suite( + title="BLS G2 Uncompressed msg hash", + summary="BLS G2 Uncompressed msg hash", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="msg_hash", + test_cases=case01_message_hash_G2_uncompressed())) + + +def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("g2_compressed", "msg_hash", gen_suite.render_suite( + title="BLS G2 Compressed msg hash", + summary="BLS G2 Compressed msg hash", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="msg_hash", + test_cases=case02_message_hash_G2_compressed())) + + + +def bls_priv_to_pub_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("priv_to_pub", "priv_to_pub", gen_suite.render_suite( + title="BLS private key to pubkey", + summary="BLS Convert private key to public key", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="priv_to_pub", + test_cases=case03_private_to_public_key())) + + +def bls_sign_msg_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("sign_msg", "sign_msg", gen_suite.render_suite( + title="BLS sign msg", + summary="BLS Sign a message", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="sign_msg", + test_cases=case04_sign_messages())) + + +def bls_aggregate_sigs_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("aggregate_sigs", "aggregate_sigs", gen_suite.render_suite( + title="BLS aggregate sigs", + summary="BLS Aggregate signatures", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="aggregate_sigs", + test_cases=case06_aggregate_sigs())) + + +def bls_aggregate_pubkeys_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("aggregate_pubkeys", "aggregate_pubkeys", gen_suite.render_suite( + title="BLS aggregate pubkeys", + summary="BLS Aggregate public keys", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="aggregate_pubkeys", + test_cases=case07_aggregate_pubkeys())) + + +if __name__ == "__main__": + gen_runner.run_generator("shuffling", [ + bls_msg_hash_compressed_suite, + bls_msg_hash_uncompressed_suite, + bls_priv_to_pub_suite, + bls_sign_msg_suite, + bls_aggregate_sigs_suite, + bls_aggregate_pubkeys_suite + ]) diff --git a/test_generators/bls/requirements.txt b/test_generators/bls/requirements.txt index 3989a3a0f..8a933d41c 100644 --- a/test_generators/bls/requirements.txt +++ b/test_generators/bls/requirements.txt @@ -1,2 +1,3 @@ py-ecc==1.6.0 -PyYAML==4.2b1 +eth-utils==1.4.1 +../../test_libs/gen_helpers From a9054fb85d7a7798db4264471a39ba41db40ce79 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 19:17:53 +1000 Subject: [PATCH 236/481] fix BLS msg hash handler naming --- test_generators/bls/main.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index 0ffa0f66b..8b6949f9f 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -160,24 +160,24 @@ def case07_aggregate_pubkeys(): def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("g2_uncompressed", "msg_hash", gen_suite.render_suite( + return ("g2_uncompressed", "msg_hash_compressed", gen_suite.render_suite( title="BLS G2 Uncompressed msg hash", summary="BLS G2 Uncompressed msg hash", forks_timeline="mainnet", forks=["phase0"], config="mainnet", - handler="msg_hash", + handler="msg_hash_compressed", test_cases=case01_message_hash_G2_uncompressed())) def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("g2_compressed", "msg_hash", gen_suite.render_suite( + return ("g2_compressed", "msg_hash_uncompressed", gen_suite.render_suite( title="BLS G2 Compressed msg hash", summary="BLS G2 Compressed msg hash", forks_timeline="mainnet", forks=["phase0"], config="mainnet", - handler="msg_hash", + handler="msg_hash_uncompressed", test_cases=case02_message_hash_G2_compressed())) From 7e2b0a946fd2a6be08a8bb658cd2985f61007052 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 19:18:53 +1000 Subject: [PATCH 237/481] fix naming mistake --- test_generators/bls/main.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index 8b6949f9f..e5288c045 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -160,24 +160,24 @@ def case07_aggregate_pubkeys(): def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("g2_uncompressed", "msg_hash_compressed", gen_suite.render_suite( + return ("g2_uncompressed", "msg_hash_uncompressed", gen_suite.render_suite( title="BLS G2 Uncompressed msg hash", summary="BLS G2 Uncompressed msg hash", forks_timeline="mainnet", forks=["phase0"], config="mainnet", - handler="msg_hash_compressed", + handler="msg_hash_uncompressed", test_cases=case01_message_hash_G2_uncompressed())) def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("g2_compressed", "msg_hash_uncompressed", gen_suite.render_suite( + return ("g2_compressed", "msg_hash_compressed", gen_suite.render_suite( title="BLS G2 Compressed msg hash", summary="BLS G2 Compressed msg hash", forks_timeline="mainnet", forks=["phase0"], config="mainnet", - handler="msg_hash_uncompressed", + handler="msg_hash_compressed", test_cases=case02_message_hash_G2_compressed())) From f958adbff134c91d270f2a689c4981b41457d64c Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 11 Apr 2019 19:25:00 +1000 Subject: [PATCH 238/481] generator running fixes --- Makefile | 3 ++- test_generators/bls/main.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bf23a1442..7ca2cc909 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec PY_TEST_DIR = ./py_tests YAML_TEST_DIR = ./yaml_tests GENERATOR_DIR = ./test_generators +CONFIGS_DIR = ./configs # Collect a list of generator names GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) @@ -57,7 +58,7 @@ define build_yaml_tests # 3) Install all the necessary requirements # 4) Run the generator. The generator is assumed to have an "main.py" file. # 5) We output to the tests dir (generator program should accept a "-o " argument. - cd $(GENERATOR_DIR)$(1); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) + cd $(GENERATOR_DIR)$(1); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR) $(info generator $(1) finished) endef diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index e5288c045..1a0373068 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -227,7 +227,7 @@ def bls_aggregate_pubkeys_suite(configs_path: str) -> gen_typing.TestSuiteOutput if __name__ == "__main__": - gen_runner.run_generator("shuffling", [ + gen_runner.run_generator("bls", [ bls_msg_hash_compressed_suite, bls_msg_hash_uncompressed_suite, bls_priv_to_pub_suite, From 8c32128ffbda5c7e056c218cdb78ab76d856c5f5 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 11 Apr 2019 22:28:42 +1000 Subject: [PATCH 239/481] initial pass on genesis slot == 0 --- specs/core/0_beacon-chain.md | 20 ++++++++++++------- .../test_process_attester_slashing.py | 3 +++ tests/phase0/helpers.py | 13 ++++++++++-- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 185b07443..ad7efc648 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -216,8 +216,8 @@ These configurations are updated for releases, but may be out of sync during `de | Name | Value | | - | - | | `GENESIS_FORK_VERSION` | `int_to_bytes4(0)` | -| `GENESIS_SLOT` | `2**32` | -| `GENESIS_EPOCH` | `slot_to_epoch(GENESIS_SLOT)` | +| `GENESIS_SLOT` | `0` | +| `GENESIS_EPOCH` | `0` | | `GENESIS_START_SHARD` | `0` | | `FAR_FUTURE_EPOCH` | `2**64 - 1` | | `ZERO_HASH` | `int_to_bytes32(0)` | @@ -1044,12 +1044,12 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: ```python def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: + attestation_data: AttestationData) -> List[ValidatorIndex]: """ Return the crosslink committee corresponding to ``attestation_data``. - """ + """ crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - + # Find the committee in the list with the desired shard assert attestation_data.shard in [shard for _, shard in crosslink_committees] crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] @@ -1554,7 +1554,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Finality previous_epoch_attestations=[], current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH - 1, + previous_justified_epoch=GENESIS_EPOCH, current_justified_epoch=GENESIS_EPOCH, previous_justified_root=ZERO_HASH, current_justified_root=ZERO_HASH, @@ -1814,6 +1814,9 @@ Run the following function: ```python def update_justification_and_finalization(state: BeaconState) -> None: + if get_current_epoch(state) == GENESIS_EPOCH: + return + new_justified_epoch = state.current_justified_epoch new_finalized_epoch = state.finalized_epoch @@ -1864,7 +1867,7 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: current_epoch = get_current_epoch(state) - previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) + previous_epoch = current_epoch if current_epoch == GENESIS_EPOCH else get_previous_epoch(state) next_epoch = current_epoch + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): @@ -1999,6 +2002,9 @@ Run the following: ```python def apply_rewards(state: BeaconState) -> None: + if get_current_epoch(state) == GENESIS_EPOCH: + return + rewards1, penalties1 = get_justification_and_finalization_deltas(state) rewards2, penalties2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): diff --git a/tests/phase0/block_processing/test_process_attester_slashing.py b/tests/phase0/block_processing/test_process_attester_slashing.py index 06f214c4b..8b7334294 100644 --- a/tests/phase0/block_processing/test_process_attester_slashing.py +++ b/tests/phase0/block_processing/test_process_attester_slashing.py @@ -9,6 +9,7 @@ from build.phase0.spec import ( ) from tests.phase0.helpers import ( get_valid_attester_slashing, + next_epoch, ) # mark entire file as 'attester_slashing' @@ -59,6 +60,8 @@ def test_success_double(state): def test_success_surround(state): + next_epoch(state) + state.current_justified_epoch += 1 attester_slashing = get_valid_attester_slashing(state) # set attestion1 to surround attestation 2 diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 61f02ea8c..020e51831 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -4,6 +4,9 @@ from py_ecc import bls import build.phase0.spec as spec from build.phase0.utils.minimal_ssz import signing_root +from build.phase0.state_transition import ( + state_transition, +) from build.phase0.spec import ( # constants EMPTY_SIGNATURE, @@ -144,7 +147,7 @@ def build_attestation_data(state, slot, shard): if epoch_start_slot == slot: epoch_boundary_root = block_root else: - get_block_root(state, epoch_start_slot) + epoch_boundary_root = get_block_root(state, epoch_start_slot) if slot < epoch_start_slot: justified_block_root = state.previous_justified_root @@ -260,7 +263,7 @@ def get_valid_attester_slashing(state): def get_valid_attestation(state, slot=None): if slot is None: slot = state.slot - shard = state.latest_start_shard + shard = state.latest_start_shard + slot % spec.SLOTS_PER_EPOCH attestation_data = build_attestation_data(state, slot, shard) crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) @@ -312,3 +315,9 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) domain_type=spec.DOMAIN_ATTESTATION, ) ) + + +def next_epoch(state): + block = build_empty_block_for_next_slot(state) + block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + state_transition(state, block) From 6ca550489ed23c53d58e61c071236400d6ba433f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 11 Apr 2019 22:48:27 +1000 Subject: [PATCH 240/481] prevent underflow of previous epoch in all cases --- specs/core/0_beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ad7efc648..394b26ee1 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -702,7 +702,8 @@ def get_previous_epoch(state: BeaconState) -> Epoch: """` Return the previous epoch of the given ``state``. """ - return get_current_epoch(state) - 1 + current_epoch = get_current_epoch(state) + return (current_epoch - 1) if current_epoch > GENESIS_EPOCH else current_epoch ``` ### `get_current_epoch` @@ -2338,6 +2339,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check target epoch, source epoch, and source root target_epoch = slot_to_epoch(attestation.data.slot) + assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), From 5824117cf9b3c606bd145f2fc98963f4fdcb3289 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 00:53:32 +1000 Subject: [PATCH 241/481] start to implement operations testing --- test_generators/operations/README.md | 13 ++ test_generators/operations/deposit.py | 173 ++++++++++++++++++++ test_generators/operations/genesis.py | 44 +++++ test_generators/operations/keys.py | 7 + test_generators/operations/main.py | 9 + test_generators/operations/requirements.txt | 5 + 6 files changed, 251 insertions(+) create mode 100644 test_generators/operations/README.md create mode 100644 test_generators/operations/deposit.py create mode 100644 test_generators/operations/genesis.py create mode 100644 test_generators/operations/keys.py create mode 100644 test_generators/operations/main.py create mode 100644 test_generators/operations/requirements.txt diff --git a/test_generators/operations/README.md b/test_generators/operations/README.md new file mode 100644 index 000000000..9f1ecfddb --- /dev/null +++ b/test_generators/operations/README.md @@ -0,0 +1,13 @@ +# Operations + +Operations (or "transactions" in previous spec iterations), + are atomic changes to the state, introduced by embedding in blocks. + +This generators provides a series of test suites, divided into handler, for each operation type. +A operation test-runner can consume these operation test-suites, + and handle different kinds of operations by processing the cases using the specified test handler. + +Information on the format of the tests can be found in the [operations test formats documentation](../../specs/test_formats/operations/README.md). + + + diff --git a/test_generators/operations/deposit.py b/test_generators/operations/deposit.py new file mode 100644 index 000000000..a72b1fbaa --- /dev/null +++ b/test_generators/operations/deposit.py @@ -0,0 +1,173 @@ +from eth2spec.phase0 import spec +from eth_utils import ( + to_dict, to_tuple +) +from gen_base import gen_suite, gen_typing +from preset_loader import loader +from eth2spec.debug.encode import encode +from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof + +from typing import List, Tuple + +import genesis +import keys +from py_ecc import bls + + +def build_deposit_data(state, + pubkey: spec.BLSPubkey, + withdrawal_cred: spec.Bytes32, + privkey: int, + amount: int): + deposit_data = spec.DepositData( + pubkey=pubkey, + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[1:], + amount=amount, + proof_of_possession=spec.EMPTY_SIGNATURE, + ) + deposit_data.proof_of_possession = bls.sign( + message_hash=signing_root(deposit_data), + privkey=privkey, + domain=spec.get_domain( + state.fork, + spec.get_current_epoch(state), + spec.DOMAIN_DEPOSIT, + ) + ) + return deposit_data + + +def build_deposit(state, + deposit_data_leaves: List[spec.Bytes32], + pubkey: spec.BLSPubkey, + withdrawal_cred: spec.Bytes32, + privkey: int, + amount: int) -> spec.Deposit: + + deposit_data = build_deposit_data(state, pubkey, withdrawal_cred, privkey, amount) + + item = spec.hash(deposit_data.serialize()) + index = len(deposit_data_leaves) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + proof = list(get_merkle_proof(tree, item_index=index)) + + deposit = spec.Deposit( + proof=list(proof), + index=index, + data=deposit_data, + ) + + return deposit + + +def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[spec.Deposit, spec.BeaconState, List[spec.Bytes32]]: + genesis_deposits = genesis.create_deposits( + keys.pubkeys[:initial_validator_count], + keys.withdrawal_creds[:initial_validator_count] + ) + state = genesis.create_genesis_state(genesis_deposits) + + deposit_data_leaves = [spec.hash(dep.data.serialize()) for dep in genesis_deposits] + + deposit = build_deposit( + state, + deposit_data_leaves, + keys.pubkeys[index], + keys.withdrawal_creds[index], + keys.privkeys[index], + spec.MAX_DEPOSIT_AMOUNT, + ) + + state.latest_eth1_data.deposit_root = get_merkle_root(tuple(deposit_data_leaves)) + state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + return deposit, state, deposit_data_leaves + + +@to_dict +def valid_deposit(): + new_dep, state, leaves = build_deposit_for_index(10, 10) + state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) + state.latest_eth1_data.deposit_count = len(leaves) + yield 'case', 'valid deposit to add new validator' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + spec.process_deposit(state, new_dep) + yield 'post', encode(state, spec.BeaconState) + + +@to_dict +def valid_topup(): + new_dep, state, leaves = build_deposit_for_index(10, 3) + state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) + state.latest_eth1_data.deposit_count = len(leaves) + yield 'case', 'valid deposit to top-up existing validator' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + spec.process_deposit(state, new_dep) + yield 'post', encode(state, spec.BeaconState) + + +@to_dict +def invalid_deposit_index(): + new_dep, state, leaves = build_deposit_for_index(10, 10) + state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) + state.latest_eth1_data.deposit_count = len(leaves) + # Mess up deposit index, 1 too small + state.deposit_index = 9 + + yield 'case', 'invalid deposit index' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + yield 'post', None + +@to_dict +def invalid_deposit_proof(): + new_dep, state, leaves = build_deposit_for_index(10, 10) + state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) + state.latest_eth1_data.deposit_count = len(leaves) + # Make deposit proof invalid (at bottom of proof) + new_dep.proof[-1] = spec.ZERO_HASH + + yield 'case', 'invalid deposit proof' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + yield 'post', None + + +@to_tuple +def deposit_cases(): + yield valid_deposit() + yield valid_topup() + yield invalid_deposit_index() + yield invalid_deposit_proof() + + +def mini_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'minimal') + spec.apply_constants_preset(presets) + + return ("deposit_minimal", "deposits", gen_suite.render_suite( + title="deposit operation", + summary="Test suite for deposit type operation processing", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + handler="core", + test_cases=deposit_cases())) + + +def full_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'mainnet') + spec.apply_constants_preset(presets) + + return ("deposit_full", "deposits", gen_suite.render_suite( + title="deposit operation", + summary="Test suite for deposit type operation processing", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + handler="core", + test_cases=deposit_cases())) diff --git a/test_generators/operations/genesis.py b/test_generators/operations/genesis.py new file mode 100644 index 000000000..7e0146f67 --- /dev/null +++ b/test_generators/operations/genesis.py @@ -0,0 +1,44 @@ +from eth2spec.phase0 import spec +from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof +from typing import List + + +def create_genesis_state(deposits: List[spec.Deposit]) -> spec.BeaconState: + deposit_root = get_merkle_root((tuple([spec.hash(dep.data.serialize()) for dep in deposits]))) + + return spec.get_genesis_beacon_state( + deposits, + genesis_time=0, + genesis_eth1_data=spec.Eth1Data( + deposit_root=deposit_root, + deposit_count=len(deposits), + block_hash=spec.ZERO_HASH, + ), + ) + + +def create_deposits(pubkeys: List[spec.BLSPubkey], withdrawal_cred: List[spec.Bytes32]) -> List[spec.Deposit]: + + # Mock proof of possession + proof_of_possession = b'\x33' * 96 + + deposit_data = [ + spec.DepositData( + pubkey=pubkeys[i], + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[i][1:], + amount=spec.MAX_DEPOSIT_AMOUNT, + proof_of_possession=proof_of_possession, + ) for i in range(len(pubkeys)) + ] + + # Fill tree with existing deposits + deposit_data_leaves = [spec.hash(data.serialize()) for data in deposit_data] + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + + return [ + spec.Deposit( + proof=list(get_merkle_proof(tree, item_index=i)), + index=i, + data=deposit_data[i] + ) for i in range(len(deposit_data)) + ] diff --git a/test_generators/operations/keys.py b/test_generators/operations/keys.py new file mode 100644 index 000000000..db4f59e0e --- /dev/null +++ b/test_generators/operations/keys.py @@ -0,0 +1,7 @@ +from py_ecc import bls +from eth2spec.phase0.spec import hash + +privkeys = list(range(1, 101)) +pubkeys = [bls.privtopub(k) for k in privkeys] +# Insecure, but easier to follow +withdrawal_creds = [hash(bls.privtopub(k)) for k in privkeys] diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py new file mode 100644 index 000000000..dd934c758 --- /dev/null +++ b/test_generators/operations/main.py @@ -0,0 +1,9 @@ +from gen_base import gen_runner + +from deposit import mini_deposits_suite, full_deposits_suite + +if __name__ == "__main__": + gen_runner.run_generator("operations", [ + mini_deposits_suite, + full_deposits_suite + ]) diff --git a/test_generators/operations/requirements.txt b/test_generators/operations/requirements.txt new file mode 100644 index 000000000..dfe853536 --- /dev/null +++ b/test_generators/operations/requirements.txt @@ -0,0 +1,5 @@ +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec +py_ecc \ No newline at end of file From 7ffcdcfd7c6ac131c00a67e8d7f3b6850750f700 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 09:12:37 +1000 Subject: [PATCH 242/481] bitfield length limit style improvement --- specs/core/0_beacon-chain.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c3161c8c0..9ff2c29cb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1817,10 +1817,8 @@ def update_justification_and_finalization(state: BeaconState) -> None: new_justified_epoch = state.current_justified_epoch new_finalized_epoch = state.finalized_epoch - # Rotate the justification bitfield up one epoch to make room for the current epoch - state.justification_bitfield <<= 1 - # Python var length integers: justification bitfield is 64 bits, and may not be bigger (for SSZ serialization) - state.justification_bitfield &= (1 << 64) - 1 + # Rotate the justification bitfield up one epoch to make room for the current epoch (and limit to 64 bits) + state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 # If the previous epoch gets justified, fill the second last bit previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: From 5e8172aaa70c8f1ffd4c1b58b35d73f27bf4b6af Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Thu, 11 Apr 2019 22:09:31 -0500 Subject: [PATCH 243/481] Update README.md (#897) --- specs/test_formats/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 371c489b6..2c1ef2d03 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -7,7 +7,7 @@ This document defines the YAML format and structure used for ETH 2.0 testing. * [About](#about) * [Glossary](#glossary) * [Test format philosophy](#test-format-philosophy) -* [Test Suite](#yaml-suite) +* [Test Suite](#test-suite) * [Config](#config) * [Fork-timeline](#fork-timeline) * [Config sourcing](#config-sourcing) @@ -28,7 +28,7 @@ The particular formats of specific types of tests (test suites) are defined in s - `suite`: a YAML file with: - a header: describes the `suite`, and defines what the `suite` is for - a list of test cases -- `runner`: where a generator is a *"producer"*, this is the *"consumer"**. +- `runner`: where a generator is a *"producer"*, this is the *"consumer"*. - A `runner` focuses on *only one* `type`, and each type has *only one* `runner`. - `handler`: a `runner` may be too limited sometimes, you may have a `suite` with a specific focus that requires a different format. To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler. From 47b9cdbb367b5227eb8a17ea625accec9bbc2f2f Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 12 Apr 2019 13:18:10 +1000 Subject: [PATCH 244/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 394b26ee1..0cf1a59c0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -224,8 +224,6 @@ These configurations are updated for releases, but may be out of sync during `de | `EMPTY_SIGNATURE` | `int_to_bytes96(0)` | | `BLS_WITHDRAWAL_PREFIX_BYTE` | `int_to_bytes1(0)` | -* `GENESIS_SLOT` should be at least as large in terms of time as the largest of the time parameters or state list lengths below (ie. it should be at least as large as any value measured in slots, and at least `SLOTS_PER_EPOCH` times as large as any value measured in epochs). - ### Time parameters | Name | Value | Unit | Duration | @@ -1608,7 +1606,7 @@ For a beacon chain block, `block`, to be processed by a node, the following cond * The parent block with root `block.previous_block_root` has been processed and accepted. * An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted. -* The node's Unix time is greater than or equal to `state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT`. (Note that leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.) +* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`. (Note that leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.) If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. @@ -2334,8 +2332,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: Process ``Attestation`` operation. Note that this function mutates ``state``. """ - assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY + assert attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation.data.slot + SLOTS_PER_EPOCH # Check target epoch, source epoch, and source root target_epoch = slot_to_epoch(attestation.data.slot) From e9a44545be522f7fa55d73840fa582df6d739226 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 12 Apr 2019 13:38:58 +1000 Subject: [PATCH 245/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0cf1a59c0..07dc5be6f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1366,14 +1366,16 @@ Note: All functions in this section mutate `state`. #### `activate_validator` ```python -def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bool) -> None: +def activate_validator(state: BeaconState, index: ValidatorIndex) -> None: """ Activate the validator of the given ``index``. Note that this function mutates ``state``. """ validator = state.validator_registry[index] - - validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) + if state.slot == GENESIS_SLOT: + validator.activation_epoch = GENESIS_EPOCH + else: + validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` #### `initiate_validator_exit` @@ -1583,7 +1585,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Process genesis activations for validator_index, _ in enumerate(state.validator_registry): if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: - activate_validator(state, validator_index, is_genesis=True) + activate_validator(state, validator_index) genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): @@ -1865,9 +1867,8 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - previous_epoch = current_epoch if current_epoch == GENESIS_EPOCH else get_previous_epoch(state) - next_epoch = current_epoch + 1 + previous_epoch = get_previous_epoch(state) + next_epoch = get_current_epoch(state) + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_root, participants = get_winning_root_and_participants(state, shard) @@ -2062,7 +2063,7 @@ def update_validator_registry(state: BeaconState) -> None: break # Activate validator - activate_validator(state, index, is_genesis=False) + activate_validator(state, index) # Exit validators within the allowable balance churn if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: From 38b6e71bd573f71134f69c57db43269c09ad87b7 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Fri, 12 Apr 2019 15:06:23 +1000 Subject: [PATCH 246/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 590f42af3..32c79acfe 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2464,7 +2464,7 @@ This section is divided into Normative and Informative references. Normative re ## Informative _**casper-ffg**_
  _Casper the Friendly Finality Gadget_. V. Buterin and V. Griffith. URL: https://arxiv.org/abs/1710.09437 - _**python-poc**_
  _Python proof-of-concept implementation_. Ethereum Foundation. URL: https://github.com/ethereum/beacon_chain + _**python-poc**_
  _Python proof-of-concept implementation_. Ethereum Foundation. URL: https://github.com/ethereum/trinity/tree/master/eth2 # Copyright Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). From b966041ddb6673d5398cae1b06ae67f646f7ae45 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 12 Apr 2019 18:56:55 +1000 Subject: [PATCH 247/481] formatting fix (#899) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 32c79acfe..0e68c192b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1039,7 +1039,7 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: ```python def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: + attestation_data: AttestationData) -> List[ValidatorIndex]: """ Return the crosslink committee corresponding to ``attestation_data``. """ From c954eab96d389c3536f3dc5c3b1dafb397dd22d9 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 12 Apr 2019 18:57:55 +1000 Subject: [PATCH 248/481] Move the ordering of the definition of `process_deposit`. This move matches the symmetry of the other operation processing definitions. --- specs/core/0_beacon-chain.md | 141 +++++++++++++++++------------------ 1 file changed, 70 insertions(+), 71 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 590f42af3..7d3e70288 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -96,7 +96,6 @@ - [`bls_verify`](#bls_verify) - [`bls_verify_multiple`](#bls_verify_multiple) - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) - - [`process_deposit`](#process_deposit) - [Routines for updating validator status](#routines-for-updating-validator-status) - [`activate_validator`](#activate_validator) - [`initiate_validator_exit`](#initiate_validator_exit) @@ -1285,75 +1284,6 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: `bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_aggregate_pubkeys). -### `process_deposit` - -Used to add a [validator](#dfn-validator) or top up an existing [validator](#dfn-validator)'s balance by some `deposit` amount: - -```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - """ - Process a deposit from Ethereum 1.0. - Note that this function mutates ``state``. - """ - # Deposits must be processed in order - assert deposit.index == state.deposit_index - - # Verify the Merkle branch - merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization - proof=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH, - index=deposit.index, - root=state.latest_eth1_data.deposit_root, - ) - assert merkle_branch_is_valid - - # Increment the next deposit index we are expecting. Note that this - # needs to be done here because while the deposit contract will never - # create an invalid Merkle branch, it may admit an invalid deposit - # object, and we need to be able to skip over it - state.deposit_index += 1 - - validator_pubkeys = [v.pubkey for v in state.validator_registry] - pubkey = deposit.data.pubkey - amount = deposit.data.amount - - if pubkey not in validator_pubkeys: - # Verify the proof of possession - proof_is_valid = bls_verify( - pubkey=pubkey, - message_hash=signing_root(deposit.data), - signature=deposit.data.proof_of_possession, - domain=get_domain( - state.fork, - get_current_epoch(state), - DOMAIN_DEPOSIT, - ) - ) - if not proof_is_valid: - return - - # Add new validator - validator = Validator( - pubkey=pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, - slashed=False, - high_balance=0 - ) - - # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. - state.validator_registry.append(validator) - state.balances.append(0) - set_balance(state, len(state.validator_registry) - 1, amount) - else: - # Increase balance by deposit amount - index = validator_pubkeys.index(pubkey) - increase_balance(state, index, amount) -``` ### Routines for updating validator status @@ -2363,7 +2293,76 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. -For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`. +For each `deposit` in `block.body.deposits`, run the following function: + +```python +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + """ + Process a deposit from Ethereum 1.0. + Used to add a validator or top up an existing validator's + balance by some ``deposit`` amount. + + Note that this function mutates ``state``. + """ + # Deposits must be processed in order + assert deposit.index == state.deposit_index + + # Verify the Merkle branch + merkle_branch_is_valid = verify_merkle_branch( + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization + proof=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH, + index=deposit.index, + root=state.latest_eth1_data.deposit_root, + ) + assert merkle_branch_is_valid + + # Increment the next deposit index we are expecting. Note that this + # needs to be done here because while the deposit contract will never + # create an invalid Merkle branch, it may admit an invalid deposit + # object, and we need to be able to skip over it + state.deposit_index += 1 + + validator_pubkeys = [v.pubkey for v in state.validator_registry] + pubkey = deposit.data.pubkey + amount = deposit.data.amount + + if pubkey not in validator_pubkeys: + # Verify the proof of possession + proof_is_valid = bls_verify( + pubkey=pubkey, + message_hash=signing_root(deposit.data), + signature=deposit.data.proof_of_possession, + domain=get_domain( + state.fork, + get_current_epoch(state), + DOMAIN_DEPOSIT, + ) + ) + if not proof_is_valid: + return + + # Add new validator + validator = Validator( + pubkey=pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + initiated_exit=False, + slashed=False, + high_balance=0 + ) + + # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. + state.validator_registry.append(validator) + state.balances.append(0) + set_balance(state, len(state.validator_registry) - 1, amount) + else: + # Increase balance by deposit amount + index = validator_pubkeys.index(pubkey) + increase_balance(state, index, amount) +``` ##### Voluntary exits From 4824b34df3a5bfbf957a42ba0590ec85d1fb5e2a Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 12 Apr 2019 19:02:16 +1000 Subject: [PATCH 249/481] Avoid materializing the merged list. Fixes #901. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 590f42af3..72aff54f4 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1194,7 +1194,7 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] return False - total_attesting_indices = len(custody_bit_0_indices + custody_bit_1_indices) + total_attesting_indices = len(custody_bit_0_indices) + len(custody_bit_1_indices) if not (1 <= total_attesting_indices <= MAX_ATTESTATION_PARTICIPANTS): return False From 1e32661c4fed06d4d8819b2c0577191d978ab59d Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 12 Apr 2019 19:54:33 +1000 Subject: [PATCH 250/481] Remove references and copyright Licensing is done at the repo level. The listed references are awkwardly incomplete (only the Python implementation was listed when we have ~10 implementations, and only the Casper paper was listed where we could add many ethresear.ch posts). --- specs/core/0_beacon-chain.md | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0717cd707..d5e978afb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -139,10 +139,6 @@ - [Voluntary exits](#voluntary-exits) - [Transfers](#transfers) - [State root verification](#state-root-verification) -- [References](#references) - - [Normative](#normative) - - [Informative](#informative) -- [Copyright](#copyright) @@ -171,7 +167,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. * **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. * **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations. * **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation. -* **Finalized**, **justified** - see Casper FFG finalization [[casper-ffg]](#ref-casper-ffg). +* **Finalized**, **justified** - see the [Casper FFG paper](https://arxiv.org/abs/1710.09437). * **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable. * **Genesis time** - the Unix time of the genesis beacon chain block at slot 0. @@ -2453,17 +2449,3 @@ Verify the block's `state_root` by running the following function: def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: assert block.state_root == hash_tree_root(state) ``` - -# References - -This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely helpful information. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. - -## Normative - -## Informative - _**casper-ffg**_
  _Casper the Friendly Finality Gadget_. V. Buterin and V. Griffith. URL: https://arxiv.org/abs/1710.09437 - - _**python-poc**_
  _Python proof-of-concept implementation_. Ethereum Foundation. URL: https://github.com/ethereum/trinity/tree/master/eth2 - -# Copyright -Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). From 3c55854072465787ccdd64d31a251553710fa5cb Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 12 Apr 2019 21:16:34 +1000 Subject: [PATCH 251/481] clean up --- specs/core/0_beacon-chain.md | 5 ++--- tests/phase0/conftest.py | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 07dc5be6f..a45e0565b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1867,9 +1867,9 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: - previous_epoch = get_previous_epoch(state) + start_epoch = get_previous_epoch(state) next_epoch = get_current_epoch(state) + 1 - for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): + for slot in range(get_epoch_start_slot(start_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_root, participants = get_winning_root_and_participants(state, shard) participating_balance = get_total_balance(state, participants) @@ -2337,7 +2337,6 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Check target epoch, source epoch, and source root target_epoch = slot_to_epoch(attestation.data.slot) - assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), diff --git a/tests/phase0/conftest.py b/tests/phase0/conftest.py index 36a087941..c0461cb67 100644 --- a/tests/phase0/conftest.py +++ b/tests/phase0/conftest.py @@ -14,7 +14,6 @@ MINIMAL_CONFIG = { "MIN_ATTESTATION_INCLUSION_DELAY": 2, "TARGET_COMMITTEE_SIZE": 4, "SLOTS_PER_EPOCH": 8, - "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, "SLOTS_PER_HISTORICAL_ROOT": 64, "LATEST_RANDAO_MIXES_LENGTH": 64, "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, From 7ca20d71cac78c6a6084c9c8e184dde223dab5d7 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 22:15:30 +1000 Subject: [PATCH 252/481] minor naming tweaks, document BLS and deposit test formats --- specs/test_formats/README.md | 8 +++++- specs/test_formats/bls/README.md | 15 +++++++++++ specs/test_formats/bls/aggregate_pubkeys.md | 21 +++++++++++++++ specs/test_formats/bls/aggregate_sigs.md | 21 +++++++++++++++ .../bls/msg_hash_g2_compressed.md | 23 ++++++++++++++++ .../bls/msg_hash_g2_uncompressed.md | 23 ++++++++++++++++ specs/test_formats/bls/priv_to_pub.md | 21 +++++++++++++++ specs/test_formats/bls/sign_msg.md | 24 +++++++++++++++++ specs/test_formats/operations/README.md | 4 +++ specs/test_formats/operations/deposits.md | 26 +++++++++++++++++++ specs/test_formats/shuffling/README.md | 0 specs/test_formats/ssz/README.md | 0 test_generators/bls/main.py | 4 +-- .../operations/{deposit.py => deposits.py} | 0 test_generators/operations/main.py | 2 +- 15 files changed, 188 insertions(+), 4 deletions(-) create mode 100644 specs/test_formats/bls/README.md create mode 100644 specs/test_formats/bls/aggregate_pubkeys.md create mode 100644 specs/test_formats/bls/aggregate_sigs.md create mode 100644 specs/test_formats/bls/msg_hash_g2_compressed.md create mode 100644 specs/test_formats/bls/msg_hash_g2_uncompressed.md create mode 100644 specs/test_formats/bls/priv_to_pub.md create mode 100644 specs/test_formats/bls/sign_msg.md create mode 100644 specs/test_formats/operations/README.md create mode 100644 specs/test_formats/operations/deposits.md create mode 100644 specs/test_formats/shuffling/README.md create mode 100644 specs/test_formats/ssz/README.md rename test_generators/operations/{deposit.py => deposits.py} (100%) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index f5d78193d..f64b93fa3 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -75,7 +75,13 @@ The second is still somewhat ambiguous: some tests may want cover multiple forks There is a common factor here however: the options are exclusive, and give a clear idea on what test suites need to be ran to cover testing for a specific fork. The way this list of forks is interpreted, is up to the test-runner: State-transition test suites may want to just declare forks that are being covered in the test suite, - whereas shuffling test suites may want to declare a list of forks to test the shuffling algorithm for individually. + whereas shuffling test suites may want to declare a list of forks to test the shuffling algorithm for individually. + +Test-formats specify the following `forks` interpretation rules: + +- `collective`: the test suite applies to all specified forks, and only needs to run once +- `individual`: the test suite should be ran against every fork +- more types may be specified with future test types. ### Test completeness diff --git a/specs/test_formats/bls/README.md b/specs/test_formats/bls/README.md new file mode 100644 index 000000000..700686bdd --- /dev/null +++ b/specs/test_formats/bls/README.md @@ -0,0 +1,15 @@ +# BLS test suite + +A test suite for BLS. Primarily geared towards verifying the *integration* of any BLS library. +We do not recommend to roll your own crypto, or use an untested BLS library. + +The BLS test suite runner has the following handlers: + +- [`aggregate_pubkeys`](./aggregate_pubkeys.md) +- [`aggregate_sigs`](./aggregate_sigs.md) +- [`msg_hash_g2_compressed`](./msg_hash_g2_compressed.md) +- [`msg_hash_g2_uncompressed`](./msg_hash_g2_uncompressed.md) +- [`priv_to_pub`](./priv_to_pub.md) +- [`sign_msg`](./sign_msg.md) + +Note: signature-verification and aggregate-verify test cases are not yet supported. diff --git a/specs/test_formats/bls/aggregate_pubkeys.md b/specs/test_formats/bls/aggregate_pubkeys.md new file mode 100644 index 000000000..9a6f1cc25 --- /dev/null +++ b/specs/test_formats/bls/aggregate_pubkeys.md @@ -0,0 +1,21 @@ +# Test format: BLS pubkey aggregation + +A BLS pubkey aggregation combines a series of pubkeys into a single pubkey. + +## Test case format + +```yaml +input: List[BLS Pubkey] -- list of input BLS pubkeys +output: BLS Pubkey -- expected output, single BLS pubkey +``` + +`BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. + + +## Condition + +The `aggregate_pubkeys` handler should aggregate the keys in the `input`, and the result should match the expected `output`. + +## Forks + +Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/aggregate_sigs.md b/specs/test_formats/bls/aggregate_sigs.md new file mode 100644 index 000000000..1588e26cb --- /dev/null +++ b/specs/test_formats/bls/aggregate_sigs.md @@ -0,0 +1,21 @@ +# Test format: BLS signature aggregation + +A BLS signature aggregation combines a series of signatures into a single signature. + +## Test case format + +```yaml +input: List[BLS Signature] -- list of input BLS signatures +output: BLS Signature -- expected output, single BLS signature +``` + +`BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`. + + +## Condition + +The `aggregate_sigs` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. + +## Forks + +Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/msg_hash_g2_compressed.md b/specs/test_formats/bls/msg_hash_g2_compressed.md new file mode 100644 index 000000000..51c64e28b --- /dev/null +++ b/specs/test_formats/bls/msg_hash_g2_compressed.md @@ -0,0 +1,23 @@ +# Test format: BLS hash-compressed + +A BLS compressed-hash to G2. + +## Test case format + +```yaml +input: + message: bytes32, + domain: bytes -- any number +output: List[bytes48] -- length of two +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `msg_hash_g2_compressed` handler should hash the `message`, with the given `domain`, to G2 with compression, and the result should match the expected `output`. + +## Forks + +Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/msg_hash_g2_uncompressed.md b/specs/test_formats/bls/msg_hash_g2_uncompressed.md new file mode 100644 index 000000000..b7d2caa02 --- /dev/null +++ b/specs/test_formats/bls/msg_hash_g2_uncompressed.md @@ -0,0 +1,23 @@ +# Test format: BLS hash-uncompressed + +A BLS uncompressed-hash to G2. + +## Test case format + +```yaml +input: + message: bytes32, + domain: bytes -- any number +output: List[List[bytes48]] -- 3 lists, each a length of two +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `msg_hash_g2_uncompressed` handler should hash the `message`, with the given `domain`, to G2, without compression, and the result should match the expected `output`. + +## Forks + +Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/priv_to_pub.md b/specs/test_formats/bls/priv_to_pub.md new file mode 100644 index 000000000..9265b83ed --- /dev/null +++ b/specs/test_formats/bls/priv_to_pub.md @@ -0,0 +1,21 @@ +# Test format: BLS private key to pubkey + +A BLS private key to public key conversion. + +## Test case format + +```yaml +input: bytes32 -- the private key +output: bytes48 -- the public key +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `priv_to_pub` handler should compute the public key for the given private key `input`, and the result should match the expected `output`. + +## Forks + +Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/sign_msg.md b/specs/test_formats/bls/sign_msg.md new file mode 100644 index 000000000..3a6d63fa2 --- /dev/null +++ b/specs/test_formats/bls/sign_msg.md @@ -0,0 +1,24 @@ +# Test format: BLS sign message + +Message signing with BLS should produce a signature. + +## Test case format + +```yaml +input: + privkey: bytes32 -- the private key used for signing + message: bytes32 -- input message to sign (a hash) + domain: bytes -- BLS domain +output: bytes96 -- expected signature +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `sign_msg` handler should sign the given `message`, with `domain`, using the given `privkey`, and the result should match the expected `output`. + +## Forks + +Forks-interpretation: `collective` diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md new file mode 100644 index 000000000..747d8217c --- /dev/null +++ b/specs/test_formats/operations/README.md @@ -0,0 +1,4 @@ +# Operations test suite + +The operations test suite + diff --git a/specs/test_formats/operations/deposits.md b/specs/test_formats/operations/deposits.md new file mode 100644 index 000000000..5aaed24f7 --- /dev/null +++ b/specs/test_formats/operations/deposits.md @@ -0,0 +1,26 @@ +# Test format: Deposit operations + +A deposit is a form of an operation (or "transaction"), modifying the state. + +## Test case format + +```yaml +case: string -- description of test case, purely for debugging purposes +pre: BeaconState -- state before applying the deposit +deposit: Deposit -- the deposit +post: BeaconState -- state after applying the deposit. No value if deposit processing is aborted. +``` + +## Condition + +A `deposits` handler of the `operations` should process these cases, + calling the implementation of the `process_deposit(state, deposit)` functionality described in the spec. +The resulting state should match the expected `post` state, or no change if the `post` state is left blank. + +## Forks + +Forks-interpretation: `collective` + +Pre and post state contain slot numbers, and are time sensitive. +Additional tests will be added for future forks to cover fork-specific behavior based on input data + (including suites with deposits on fork transition blocks, covering multiple forks) diff --git a/specs/test_formats/shuffling/README.md b/specs/test_formats/shuffling/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/specs/test_formats/ssz/README.md b/specs/test_formats/ssz/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index 1a0373068..da6a79aae 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -160,7 +160,7 @@ def case07_aggregate_pubkeys(): def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("g2_uncompressed", "msg_hash_uncompressed", gen_suite.render_suite( + return ("g2_uncompressed", "msg_hash_g2_uncompressed", gen_suite.render_suite( title="BLS G2 Uncompressed msg hash", summary="BLS G2 Uncompressed msg hash", forks_timeline="mainnet", @@ -171,7 +171,7 @@ def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOu def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("g2_compressed", "msg_hash_compressed", gen_suite.render_suite( + return ("g2_compressed", "msg_hash_g2_compressed", gen_suite.render_suite( title="BLS G2 Compressed msg hash", summary="BLS G2 Compressed msg hash", forks_timeline="mainnet", diff --git a/test_generators/operations/deposit.py b/test_generators/operations/deposits.py similarity index 100% rename from test_generators/operations/deposit.py rename to test_generators/operations/deposits.py diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py index dd934c758..8b0a2a6d8 100644 --- a/test_generators/operations/main.py +++ b/test_generators/operations/main.py @@ -1,6 +1,6 @@ from gen_base import gen_runner -from deposit import mini_deposits_suite, full_deposits_suite +from deposits import mini_deposits_suite, full_deposits_suite if __name__ == "__main__": gen_runner.run_generator("operations", [ From fcb7e3437e9902e9443bd2f1ce113e2df8d3b864 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 22:19:10 +1000 Subject: [PATCH 253/481] test format doc tweaks --- specs/test_formats/bls/README.md | 4 ++-- specs/test_formats/operations/README.md | 10 ++++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/specs/test_formats/bls/README.md b/specs/test_formats/bls/README.md index 700686bdd..db63bba1d 100644 --- a/specs/test_formats/bls/README.md +++ b/specs/test_formats/bls/README.md @@ -1,6 +1,6 @@ -# BLS test suite +# BLS tests -A test suite for BLS. Primarily geared towards verifying the *integration* of any BLS library. +A test type for BLS. Primarily geared towards verifying the *integration* of any BLS library. We do not recommend to roll your own crypto, or use an untested BLS library. The BLS test suite runner has the following handlers: diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md index 747d8217c..842dc3615 100644 --- a/specs/test_formats/operations/README.md +++ b/specs/test_formats/operations/README.md @@ -1,4 +1,10 @@ -# Operations test suite +# Operations tests + +The different kinds of operations ("transactions") are tested individually with test handlers. + +The tested operation kinds are: +- [`deposits`](./deposits.md) +- More tests are work-in-progress. + -The operations test suite From 0aa1cc8f6094b1e7f6ca6d0391bcbce3575950e7 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 22:30:19 +1000 Subject: [PATCH 254/481] update shuffling test docs --- specs/test_formats/shuffling/README.md | 36 ++++++++++++++++++++++++++ test_generators/shuffling/README.md | 6 ----- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/specs/test_formats/shuffling/README.md b/specs/test_formats/shuffling/README.md index e69de29bb..efc1b7b1a 100644 --- a/specs/test_formats/shuffling/README.md +++ b/specs/test_formats/shuffling/README.md @@ -0,0 +1,36 @@ +# Test format: shuffling + +The runner of the Shuffling test type has only one handler: `core` + +This does not mean however that testing is limited. +Clients may take different approaches to shuffling, for optimizing, + and supporting advanced lookup behavior back in older history. + +For implementers, possible test runners implementing testing can include: +1) just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation) +2) test un-permute-index (the reverse lookup. Implemented by running the shuffling rounds in reverse: from `round_count-1` to `0`) +3) test the optimized complete shuffle, where all indices are shuffled at once, test output in one go. +4) test complete shuffle in reverse (reverse rounds, same as 2) + +## Test case format + +```yaml +seed: bytes32 +count: int +shuffled: List[int] +``` + +- The `bytes32` is encoded as strings, hexadecimal encoding, prefixed with `0x`. +- Integers are validator indices. These are `uint64`, but realistically they are not as big. + +The `count` specifies the validator registry size. One should compute the shuffling for indices `0, 1, 2, 3, ..., count (exclusive)`. +Seed is the raw shuffling seed, passed to permute-index (or optimized shuffling approach). + +## Condition + +The resulting list should match the expected output `shuffled` after shuffling the implied input, using the given `seed`. + +## Forks + +Forks-interpretation: `collective` + diff --git a/test_generators/shuffling/README.md b/test_generators/shuffling/README.md index 5cf9d50e0..a8f0cbdb4 100644 --- a/test_generators/shuffling/README.md +++ b/test_generators/shuffling/README.md @@ -2,12 +2,6 @@ Tests for the swap-or-not shuffling in ETH 2.0. -For implementers, possible test runners implementing testing can include: -1) just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation) -2) test un-permute-index (the reverse lookup. Implemented by running the shuffling rounds in reverse: from `round_count-1` to `0`) -3) test the optimized complete shuffle, where all indices are shuffled at once, test output in one go. -4) test complete shuffle in reverse (reverse rounds, same as 2) - Tips for initial shuffling write: - run with `round_count = 1` first, do the same with pyspec. - start with permute index From 88d076b615d5b45887faece7dbdfaa968b7e8f7a Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 22:48:59 +1000 Subject: [PATCH 255/481] initial work to fix outdated ssz tests --- specs/test_formats/ssz/uint.md | 26 ++++++++++++++++++++++++++ test_generators/ssz/main.py | 6 +++--- 2 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 specs/test_formats/ssz/uint.md diff --git a/specs/test_formats/ssz/uint.md b/specs/test_formats/ssz/uint.md new file mode 100644 index 000000000..c05a343af --- /dev/null +++ b/specs/test_formats/ssz/uint.md @@ -0,0 +1,26 @@ +# Test format: SSZ uints + +SSZ supports encoding of uints up to 32 bytes. These are considered to be basic types. + +## Test case format + +```yaml +TODO: old format +# type: "uintN" -- string, where N is one of [8, 16, 32, 64, 128, 256] +# valid: bool -- expected validity of the input data +# ssz: bytes -- string, input data, hex encoded, with prefix 0x +# tags: List[string] -- description of test case, in the form of a list of labels +``` + +## Condition + +- Encoding: After encoding the given input number, the +- Decoding: After decoding the given `output` bytes, it should match the `input` number + +## Forks + +Forks-interpretation: `collective` + +``` + +``` \ No newline at end of file diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py index f6454c3c0..e53204df0 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz/main.py @@ -8,7 +8,7 @@ from uint_test_cases import ( from gen_base import gen_runner, gen_suite, gen_typing def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("uint_random", "core", gen_suite.render_suite( + return ("uint_random", "uint", gen_suite.render_suite( title="UInt Random", summary="Random integers chosen uniformly over the allowed value range", forks_timeline= "mainnet", @@ -18,7 +18,7 @@ def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: test_cases=generate_random_uint_test_cases())) def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("uint_wrong_length", "core", gen_suite.render_suite( + return ("uint_wrong_length", "uint", gen_suite.render_suite( title="UInt Wrong Length", summary="Serialized integers that are too short or too long", forks_timeline= "mainnet", @@ -28,7 +28,7 @@ def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: test_cases=generate_uint_wrong_length_test_cases())) def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - return ("uint_bounds", "core", gen_suite.render_suite( + return ("uint_bounds", "uint", gen_suite.render_suite( title="UInt Bounds", summary="Integers right at or beyond the bounds of the allowed value range", forks_timeline= "mainnet", From b6c45b9dc1326761b88aa05811416a0fc885530a Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 23:02:36 +1000 Subject: [PATCH 256/481] update ssz test format and fix uint sizes --- specs/test_formats/ssz/uint.md | 19 ++++++++----------- test_generators/ssz/main.py | 2 +- test_generators/ssz/uint_test_cases.py | 2 +- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/specs/test_formats/ssz/uint.md b/specs/test_formats/ssz/uint.md index c05a343af..f71ddecb8 100644 --- a/specs/test_formats/ssz/uint.md +++ b/specs/test_formats/ssz/uint.md @@ -5,22 +5,19 @@ SSZ supports encoding of uints up to 32 bytes. These are considered to be basic ## Test case format ```yaml -TODO: old format -# type: "uintN" -- string, where N is one of [8, 16, 32, 64, 128, 256] -# valid: bool -- expected validity of the input data -# ssz: bytes -- string, input data, hex encoded, with prefix 0x -# tags: List[string] -- description of test case, in the form of a list of labels +type: "uintN" -- string, where N is one of [8, 16, 32, 64, 128, 256] +valid: bool -- expected validity of the input data +value: string -- string, decimal encoding, to support up to 256 bit integers +ssz: bytes -- string, input data, hex encoded, with prefix 0x +tags: List[string] -- description of test case, in the form of a list of labels ``` ## Condition -- Encoding: After encoding the given input number, the -- Decoding: After decoding the given `output` bytes, it should match the `input` number +Two-way testing can be implemented in the test-runner: +- Encoding: After encoding the given input number `value`, the output should match `ssz` +- Decoding: After decoding the given `ssz` bytes, it should match the input number `value` ## Forks Forks-interpretation: `collective` - -``` - -``` \ No newline at end of file diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py index e53204df0..0e5d9d7c8 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz/main.py @@ -39,4 +39,4 @@ def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: if __name__ == "__main__": - gen_runner.run_generator("ssz", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite]) \ No newline at end of file + gen_runner.run_generator("ssz", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite]) diff --git a/test_generators/ssz/uint_test_cases.py b/test_generators/ssz/uint_test_cases.py index d123564ca..9ede16848 100644 --- a/test_generators/ssz/uint_test_cases.py +++ b/test_generators/ssz/uint_test_cases.py @@ -15,7 +15,7 @@ from renderers import ( random.seed(0) -BIT_SIZES = [i for i in range(8, 512 + 1, 8)] +BIT_SIZES = [8, 16, 32, 64, 128, 256] RANDOM_TEST_CASES_PER_BIT_SIZE = 10 RANDOM_TEST_CASES_PER_LENGTH = 3 From a3b020384ffadcb677ba4374cbfa9d0ff2b73a9b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 12 Apr 2019 23:17:57 +1000 Subject: [PATCH 257/481] add tests for finality through the first few epochs --- specs/core/0_beacon-chain.md | 2 +- tests/phase0/helpers.py | 44 +++++++++++++++++++++++++---- tests/phase0/test_sanity.py | 55 ++++++++++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+), 7 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a45e0565b..dfa26c86d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1007,7 +1007,7 @@ def get_beacon_proposer_index(state: BeaconState, the epoch in question, this can only be run for the current epoch. """ current_epoch = get_current_epoch(state) - assert slot_to_epoch(slot) == current_epoch + # assert slot_to_epoch(slot) == current_epoch first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] i = 0 diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 020e51831..66ae17f2c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -34,6 +34,7 @@ from build.phase0.spec import ( get_empty_block, get_epoch_start_slot, get_genesis_beacon_state, + get_previous_epoch, slot_to_epoch, verify_merkle_branch, hash, @@ -50,6 +51,19 @@ pubkeys = [bls.privtopub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} +def set_bitfield_bit(bitfield, i): + """ + Set the bit in ``bitfield`` at position ``i`` to ``1``. + """ + byte_index = i // 8 + bit_index = i % 8 + return ( + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index+1:] + ) + + def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): if not deposit_data_leaves: deposit_data_leaves = [] @@ -141,24 +155,31 @@ def build_deposit_data(state, pubkey, privkey, amount): def build_attestation_data(state, slot, shard): assert state.slot >= slot - block_root = build_empty_block_for_next_slot(state).previous_block_root + if slot == state.slot: + block_root = build_empty_block_for_next_slot(state).previous_block_root + else: + block_root = get_block_root(state, slot) - epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - if epoch_start_slot == slot: + current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + if slot < current_epoch_start_slot: + epoch_boundary_root = get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) + elif slot == current_epoch_start_slot: epoch_boundary_root = block_root else: - epoch_boundary_root = get_block_root(state, epoch_start_slot) + epoch_boundary_root = get_block_root(state, current_epoch_start_slot) - if slot < epoch_start_slot: + if slot < current_epoch_start_slot: + justified_epoch = state.previous_justified_epoch justified_block_root = state.previous_justified_root else: + justified_epoch = state.current_justified_epoch justified_block_root = state.current_justified_root return AttestationData( slot=slot, shard=shard, beacon_block_root=block_root, - source_epoch=state.current_justified_epoch, + source_epoch=justified_epoch, source_root=justified_block_root, target_root=epoch_boundary_root, crosslink_data_root=spec.ZERO_HASH, @@ -317,6 +338,17 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) ) +def fill_aggregate_attestation(state, attestation): + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) + for i in range(len(crosslink_committee)): + attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) + + +def next_slot(state): + block = build_empty_block_for_next_slot(state) + state_transition(state, block) + + def next_epoch(state): block = build_empty_block_for_next_slot(state) block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 0930bad07..67a27f3f2 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -39,10 +39,12 @@ from build.phase0.utils.merkle_minimal import ( from tests.phase0.helpers import ( build_deposit_data, build_empty_block_for_next_slot, + fill_aggregate_attestation, force_registry_change_at_next_epoch, get_valid_attestation, get_valid_attester_slashing, get_valid_proposer_slashing, + next_slot, privkeys, pubkeys, ) @@ -52,6 +54,33 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.sanity +def check_finality(state, + prev_state, + current_justified_changed, + previous_justified_changed, + finalized_changed): + if current_justified_changed: + assert state.current_justified_epoch > prev_state.current_justified_epoch + assert state.current_justified_root != prev_state.current_justified_root + else: + assert state.current_justified_epoch == prev_state.current_justified_epoch + assert state.current_justified_root == prev_state.current_justified_root + + if previous_justified_changed: + assert state.previous_justified_epoch > prev_state.previous_justified_epoch + assert state.previous_justified_root != prev_state.previous_justified_root + else: + assert state.previous_justified_epoch == prev_state.previous_justified_epoch + assert state.previous_justified_root == prev_state.previous_justified_root + + if finalized_changed: + assert state.finalized_epoch > prev_state.finalized_epoch + assert state.finalized_root != prev_state.finalized_root + else: + assert state.finalized_epoch == prev_state.finalized_epoch + assert state.finalized_root == prev_state.finalized_root + + def test_slot_transition(state): test_state = deepcopy(state) cache_state(test_state) @@ -116,6 +145,32 @@ def test_empty_epoch_transition_not_finalizing(state): return state, [block], test_state +def test_full_attestations_finalizing(state): + test_state = deepcopy(state) + + for slot in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(test_state) + + for epoch in range(5): + for slot in range(spec.SLOTS_PER_EPOCH): + attestation = get_valid_attestation(test_state, test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY) + fill_aggregate_attestation(test_state, attestation) + block = build_empty_block_for_next_slot(test_state) + block.body.attestations.append(attestation) + state_transition(test_state, block) + + if epoch == 0: + check_finality(test_state, state, False, False, False) + elif epoch == 1: + check_finality(test_state, state, False, False, False) + elif epoch == 2: + check_finality(test_state, state, True, False, False) + elif epoch == 3: + check_finality(test_state, state, True, True, False) + elif epoch == 4: + check_finality(test_state, state, True, True, True) + + def test_proposer_slashing(state): test_state = deepcopy(state) proposer_slashing = get_valid_proposer_slashing(state) From 6b701a6c8b0dedb097163902c19d07d68f024cdd Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 12 Apr 2019 23:59:19 +1000 Subject: [PATCH 258/481] update tests format docs --- specs/test_formats/README.md | 26 +++++++++++++++++++------- specs/test_formats/ssz/README.md | 15 +++++++++++++++ 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 71e72f0cb..53973495d 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -17,8 +17,17 @@ This document defines the YAML format and structure used for ETH 2.0 testing. Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform. +### Test-case formats + The particular formats of specific types of tests (test suites) are defined in separate documents. +Test formats: +- [`bls`](./bls/README.md) +- [`operations`](./operations/README.md) +- [`shuffling`](./shuffling/README.md) +- [`ssz`](./ssz/README.md) +- More formats are planned, see tracking issues for CI/testing + ## Glossary - `generator`: a program that outputs one or more `suite` files. @@ -167,11 +176,14 @@ To prevent parsing of hundreds of different YAML files to test a specific test t or even more specific, just a handler, tests should be structured in the following nested form: ``` -. <--- root of eth2.0 tests repository -├── bls <--- collection of handler for a specific test-runner, example runner: "bls" -│   ├── signing <--- collection of test suites for a specific handler, example handler: "signing". If no multiple handlers, use a dummy folder (e.g. "main"), and specify that in the yaml. -│   │   ├── sign_msg.yml <--- an entry list of test suites -│   │   ... <--- more suite files (optional) -│   ... <--- more handlers -... <--- more test types +. <--- root of eth2.0 tests repository +├── bls <--- collection of handler for a specific test-runner, example runner: "bls" +│   ├── verify_msg <--- collection of test suites for a specific handler, example handler: "verify_msg". If no multiple handlers, use a dummy folder (e.g. "core"), and specify that in the yaml. +│   │   ├── verify_valid.yml . +│   │   ├── special_cases.yml . a list of test suites +│   │   ├── domains.yml . +│   │   ├── invalid.yml . +│   │   ... <--- more suite files (optional) +│   ... <--- more handlers +... <--- more test types ``` diff --git a/specs/test_formats/ssz/README.md b/specs/test_formats/ssz/README.md index e69de29bb..72ba7dac1 100644 --- a/specs/test_formats/ssz/README.md +++ b/specs/test_formats/ssz/README.md @@ -0,0 +1,15 @@ +# SSZ tests + +SSZ has changed throughout the development of ETH 2.0. + +## Contents + +A minimal but useful series of tests covering `uint` encoding and decoding is provided. +This is a direct port of the older SSZ `uint` tests (minus outdated test cases). + +[uint test format](./uint.md). + +Note: the current phase-0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc. +The exact uint lengths to support may be redefined in the future. + +Extension of the SSZ tests collection is planned, see CI/testing issues for progress tracking. From fcc4dc3710557da1837cb2c4e53b78fcc95857e1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 13 Apr 2019 00:03:04 +1000 Subject: [PATCH 259/481] temporarily disable the yaml save and commit, exact workflow TBD in future PR --- .circleci/config.yml | 133 ++++++++++++++++++++++--------------------- 1 file changed, 68 insertions(+), 65 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c347a064f..d40fd467f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -19,68 +19,71 @@ jobs: name: Generate YAML tests command: make gen_yaml_tests - - store_artifacts: - path: test-reports - destination: test-reports - - - run: - name: Save YAML tests for deployment - command: | - mkdir /tmp/workspace - cp -r yaml_tests /tmp/workspace/ - git log -1 >> /tmp/workspace/latest_commit_message - - persist_to_workspace: - root: /tmp/workspace - paths: - - yaml_tests - - latest_commit_message - commit: - docker: - - image: circleci/python:3.6 - steps: - - attach_workspace: - at: /tmp/workspace - - add_ssh_keys: - fingerprints: - - "01:85:b6:36:96:a6:84:72:e4:9b:4e:38:ee:21:97:fa" - - run: - name: Checkout test repository - command: | - ssh-keyscan -H github.com >> ~/.ssh/known_hosts - git clone git@github.com:ethereum/eth2.0-tests.git - - run: - name: Commit and push generated YAML tests - command: | - cd eth2.0-tests - git config user.name 'eth2TestGenBot' - git config user.email '47188154+eth2TestGenBot@users.noreply.github.com' - for filename in /tmp/workspace/yaml_tests/*; do - rm -rf $(basename $filename) - cp -r $filename . - done - git add . - if git diff --cached --exit-code >& /dev/null; then - echo "No changes to commit" - else - echo -e "Update generated tests\n\nLatest commit message from eth2.0-specs:\n" > commit_message - cat /tmp/workspace/latest_commit_message >> commit_message - git commit -F commit_message - git push origin master - fi -workflows: - version: 2.1 - - build_and_commit: - jobs: - - build: - filters: - tags: - only: /.*/ - - commit: - requires: - - build - filters: - tags: - only: /.*/ - branches: - ignore: /.*/ \ No newline at end of file +# TODO in future PR (after #851): decide on CI triggering of yaml tests building, +# and destination of output (new yaml tests LFS-configured repository) +# +# - store_artifacts: +# path: test-reports +# destination: test-reports +# +# - run: +# name: Save YAML tests for deployment +# command: | +# mkdir /tmp/workspace +# cp -r yaml_tests /tmp/workspace/ +# git log -1 >> /tmp/workspace/latest_commit_message +# - persist_to_workspace: +# root: /tmp/workspace +# paths: +# - yaml_tests +# - latest_commit_message +# commit: +# docker: +# - image: circleci/python:3.6 +# steps: +# - attach_workspace: +# at: /tmp/workspace +# - add_ssh_keys: +# fingerprints: +# - "01:85:b6:36:96:a6:84:72:e4:9b:4e:38:ee:21:97:fa" +# - run: +# name: Checkout test repository +# command: | +# ssh-keyscan -H github.com >> ~/.ssh/known_hosts +# git clone git@github.com:ethereum/eth2.0-tests.git +# - run: +# name: Commit and push generated YAML tests +# command: | +# cd eth2.0-tests +# git config user.name 'eth2TestGenBot' +# git config user.email '47188154+eth2TestGenBot@users.noreply.github.com' +# for filename in /tmp/workspace/yaml_tests/*; do +# rm -rf $(basename $filename) +# cp -r $filename . +# done +# git add . +# if git diff --cached --exit-code >& /dev/null; then +# echo "No changes to commit" +# else +# echo -e "Update generated tests\n\nLatest commit message from eth2.0-specs:\n" > commit_message +# cat /tmp/workspace/latest_commit_message >> commit_message +# git commit -F commit_message +# git push origin master +# fi +#workflows: +# version: 2.1 +# +# build_and_commit: +# jobs: +# - build: +# filters: +# tags: +# only: /.*/ +# - commit: +# requires: +# - build +# filters: +# tags: +# only: /.*/ +# branches: +# ignore: /.*/ \ No newline at end of file From f4753d6157841eadff7ed65bc21a479ce889c546 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Fri, 12 Apr 2019 18:32:36 -0500 Subject: [PATCH 260/481] Update rpc-interface.md (#907) --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index fa49bcd75..85ebe0bf6 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -9,7 +9,7 @@ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", # Dependencies -This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification), and [Beacon Chain](../core/0_beacon-chain.md) specifications. +This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification.md), and [Beacon Chain](../core/0_beacon-chain.md) specifications. # Specification From 0b6d6f2c51eb3ff66373ec5b9d86db22711ee285 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Fri, 12 Apr 2019 18:33:34 -0500 Subject: [PATCH 261/481] Update 0_beacon-chain-validator.md (#909) --- specs/validator/0_beacon-chain-validator.md | 32 ++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 7b03a910a..1fbe08ef4 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- Honest Validator -__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol. +__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol. ## Table of Contents @@ -66,7 +66,7 @@ A validator is an entity that participates in the consensus of the Ethereum 2.0 ## Prerequisites -All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout. ## Constants @@ -84,7 +84,7 @@ A validator must initialize many parameters locally before submitting a deposit #### BLS public key -Validator public keys are [G1 points](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#g1-points) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator. +Validator public keys are [G1 points](../bls_signature.md#g1-points) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator. #### BLS withdrawal key @@ -96,7 +96,7 @@ The validator constructs their `withdrawal_credentials` via the following: ### Submit deposit -In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) located at `DEPOSIT_CONTRACT_ADDRESS`. +In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](../core/0_beacon-chain.md#ethereum-10-deposit-contract) located at `DEPOSIT_CONTRACT_ADDRESS`. To submit a deposit: @@ -114,13 +114,13 @@ Deposits cannot be processed into the beacon chain until the eth1.0 block in whi ### Validator index -Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally. +Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](../core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally. ### Activation In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes). -The function [`is_active_validator`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: +The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: ```python shuffling_epoch = state.current_shuffling_epoch @@ -138,7 +138,7 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b ### Block proposal -A validator is expected to propose a [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function). +A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (eg. at 312500 validators = 10 million ETH, that's once per ~3 weeks). @@ -212,25 +212,25 @@ block_signature = bls_sign( ##### Proposer slashings -Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. +Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](../core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](../core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. ##### Attester slashings -Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. +Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](../core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](../core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. ##### Attestations -Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. +Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](../core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. ##### Deposits -If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits). +If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](../core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. ##### Voluntary exits -Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntary-exits). +Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits). ### Attestations @@ -240,7 +240,7 @@ A validator should create and broadcast the attestation halfway through the `slo #### Attestation data -First the validator should construct `attestation_data`, an [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot. +First the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot. * Let `head_block` be the result of running the fork choice during the assigned slot. * Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot. @@ -285,7 +285,7 @@ Set `attestation_data.source_root = head_state.current_justified_root`. #### Construct attestation -Next the validator creates `attestation`, an [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object. +Next the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object. ##### Data @@ -399,7 +399,7 @@ _Note_: Signed data must be within a sequential `Fork` context to conflict. Mess ### Proposer slashing -To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposalsigneddata) where conflicting is defined as two distinct blocks within the same epoch. +To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch. _In phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings._ @@ -411,7 +411,7 @@ If the software crashes at some point within this routine, then when the validat ### Attester slashing -To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_surround_vote). +To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](../core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](../core/0_beacon-chain.md#is_surround_vote). Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order: 1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`. From ee1578d22a4dece09dc399716d8f3362ee8f708a Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Fri, 12 Apr 2019 18:33:53 -0500 Subject: [PATCH 262/481] Update 0_beacon-chain.md (#908) --- specs/core/0_beacon-chain.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d5e978afb..b7a63b4f6 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -284,7 +284,7 @@ These configurations are updated for releases, but may be out of sync during `de ## Data structures -The following data structures are defined as [SimpleSerialize (SSZ)](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md) objects. +The following data structures are defined as [SimpleSerialize (SSZ)](../simple-serialize.md) objects. The types are defined topologically to aid in facilitating an executable version of the spec. @@ -657,11 +657,11 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere ### `hash_tree_root` -`def hash_tree_root(object: SSZSerializable) -> Bytes32` is a function for hashing objects into a single root utilizing a hash tree structure. `hash_tree_root` is defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#tree-hash). +`def hash_tree_root(object: SSZSerializable) -> Bytes32` is a function for hashing objects into a single root utilizing a hash tree structure. `hash_tree_root` is defined in the [SimpleSerialize spec](../simple-serialize.md#merkleization). ### `signing_root` -`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) to compute signing messages. +`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. ### `get_temporary_block_header` @@ -1270,15 +1270,15 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: ### `bls_verify` -`bls_verify` is a function for verifying a BLS signature, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_verify). +`bls_verify` is a function for verifying a BLS signature, defined in the [BLS Signature spec](../bls_signature.md#bls_verify). ### `bls_verify_multiple` -`bls_verify_multiple` is a function for verifying a BLS signature constructed from multiple messages, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_verify_multiple). +`bls_verify_multiple` is a function for verifying a BLS signature constructed from multiple messages, defined in the [BLS Signature spec](../bls_signature.md#bls_verify_multiple). ### `bls_aggregate_pubkeys` -`bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_aggregate_pubkeys). +`bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, defined in the [BLS Signature spec](../bls_signature.md#bls_aggregate_pubkeys). ### Routines for updating validator status From b34c41c5257731653d38fbb73373ec27996e138d Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sat, 13 Apr 2019 09:56:19 +1000 Subject: [PATCH 263/481] Update 0_beacon-chain.md (#911) Fix prose around state transition functionality --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b7a63b4f6..a0001d03b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1603,9 +1603,9 @@ We now define the state transition function. At a high level, the state transiti 4. The per-block transitions, which happens at every block. Transition section notes: -* The state caching caches the state root of the previous slot. +* The state caching caches the state root of the previous slot and updates block and state roots records. * The per-epoch transitions focus on the [validator](#dfn-validator) registry, including adjusting balances and activating and exiting [validators](#dfn-validator), as well as processing crosslinks and managing block justification/finalization. -* The per-slot transitions focus on the slot counter and block roots records updates. +* The per-slot transitions focus on the slot counter. * The per-block transitions generally focus on verifying aggregate signatures and saving temporary records relating to the per-block activity in the `BeaconState`. Beacon blocks that trigger unhandled Python exceptions (e.g. out-of-range list accesses) and failed `assert`s during the state transition are considered invalid. From 0a8b5275ab87c322206444e4ec19e77c20936ee6 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Fri, 12 Apr 2019 18:56:37 -0500 Subject: [PATCH 264/481] Update rpc-interface.md (#910) --- specs/networking/rpc-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index 85ebe0bf6..5d408b5a0 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -26,7 +26,7 @@ Message body schemas are notated like this: Embedded types are serialized as SSZ Containers unless otherwise noted. -All referenced data structures can be found in the [0-beacon-chain](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#data-structures) specification. +All referenced data structures can be found in the [0-beacon-chain](../core/0_beacon-chain.md#data-structures) specification. ## `libp2p` Protocol Names From 1932a4fbf4ef6cb0309bdff9722316710cc1ccc8 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Fri, 12 Apr 2019 19:46:22 -0500 Subject: [PATCH 265/481] Update 0_beacon-chain-validator.md --- specs/validator/0_beacon-chain-validator.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 1fbe08ef4..5f13fc2c3 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -100,11 +100,11 @@ In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW To submit a deposit: -* Pack the validator's [initialization parameters](#initialization) into `deposit_input`, a [`DepositInput`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositinput) SSZ object. -* Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`. -* Set `deposit_input.proof_of_possession = proof_of_possession`. +* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object. +* Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`. +* Set `deposit_data.proof_of_possession = proof_of_possession`. * Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. -* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit_input)` as the singular `bytes` input along with a deposit `amount` in Gwei. +* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit.data)` as the singular `bytes` input along with a deposit `amount` in Gwei. _Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`. From 710bacad757ae91fd94577b513b335e748b33e03 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sat, 13 Apr 2019 11:56:55 +1000 Subject: [PATCH 266/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a0001d03b..f6427f1d4 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -234,7 +234,7 @@ These configurations are updated for releases, but may be out of sync during `de | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | -| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | +| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` From 75df6106aae2ed3ce1cb51894f5e78a6c589c664 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 12 Apr 2019 23:23:04 -0600 Subject: [PATCH 267/481] clean up some deposit notes in validator doc --- specs/validator/0_beacon-chain-validator.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 5f13fc2c3..60d283664 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -104,7 +104,8 @@ To submit a deposit: * Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`. * Set `deposit_data.proof_of_possession = proof_of_possession`. * Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. -* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit.data)` as the singular `bytes` input along with a deposit `amount` in Gwei. +* Set `deposit_data.amount = amount`. +* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit(deposit_input: bytes[512])` along with `serialize(deposit_data)` as the singular `bytes` input along with a deposit of `amount` Gwei. _Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`. From a6b3b11356e13d22ad49e84d9985f971810a447a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 13 Apr 2019 17:49:25 +1000 Subject: [PATCH 268/481] ensure no reward for crosslinks taht can't form a chain --- specs/core/0_beacon-chain.md | 7 ++++++ .../test_process_crosslinks.py | 23 +++++++++++-------- tests/phase0/helpers.py | 7 +++++- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6a81dadcf..70b5bac9b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1917,6 +1917,13 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: for slot in range(previous_epoch_start_slot, current_epoch_start_slot): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_root, previous_crosslink_root, participants = get_winning_root_and_participants(state, slot, shard) + + # do not count as success if winning_root did not or cannot form a chain + attempted_crosslink = Crosslink(epoch=slot_to_epoch(slot), crosslink_data_root=winning_root, previous_crosslink_root=previous_crosslink_root) + current_crosslink_root = hash_tree_root(state.current_crosslinks[shard]) + if not current_crosslink_root in {previous_crosslink_root, hash_tree_root(attempted_crosslink) }: + participants = [] + participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: diff --git a/tests/phase0/epoch_processing/test_process_crosslinks.py b/tests/phase0/epoch_processing/test_process_crosslinks.py index f2be142c6..06dc07d85 100644 --- a/tests/phase0/epoch_processing/test_process_crosslinks.py +++ b/tests/phase0/epoch_processing/test_process_crosslinks.py @@ -7,12 +7,9 @@ from build.phase0.state_transition import ( state_transition, ) from build.phase0.spec import ( - ZERO_HASH, cache_state, - get_crosslink_committee_for_attestation, - get_current_epoch, + get_crosslink_deltas, process_crosslinks, - slot_to_epoch, ) from tests.phase0.helpers import ( add_attestation_to_state, @@ -20,6 +17,7 @@ from tests.phase0.helpers import ( fill_aggregate_attestation, get_valid_attestation, next_epoch, + next_slot, set_bitfield_bit, ) @@ -102,8 +100,11 @@ def test_double_late_crosslink(state): next_epoch(state) add_attestation_to_state(state, attestation_1, state.slot + 1) - state.slot = attestation_1.data.slot + spec.SLOTS_PER_EPOCH - attestation_2 = get_valid_attestation(state) + for slot in range(spec.SLOTS_PER_EPOCH): + attestation_2 = get_valid_attestation(state) + if attestation_2.data.shard == attestation_1.data.shard: + break + next_slot(state) fill_aggregate_attestation(state, attestation_2) # add attestation_2 in the next epoch after attestation_1 has @@ -115,13 +116,15 @@ def test_double_late_crosslink(state): assert len(state.current_epoch_attestations) == 0 pre_state, post_state = run_process_crosslinks(state) + crosslink_deltas = get_crosslink_deltas(state) - shard_1 = attestation_1.data.shard - shard_2 = attestation_2.data.shard - assert shard_1 == shard_2 - shard = shard_1 + shard = attestation_2.data.shard + slot = attestation_2.data.slot # ensure that the current crosslinks were not updated by the second attestation assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + # ensure no reward, only penalties for the failed crosslink + assert crosslink_deltas[0][slot % spec.SLOTS_PER_EPOCH] == 0 + assert crosslink_deltas[1][slot % spec.SLOTS_PER_EPOCH] > 0 return pre_state, post_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index cd731e49c..219e77c47 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -274,7 +274,7 @@ def get_valid_attester_slashing(state): def get_valid_attestation(state, slot=None): if slot is None: slot = state.slot - shard = state.latest_start_shard + slot % spec.SLOTS_PER_EPOCH + shard = (state.latest_start_shard + slot) % spec.SLOTS_PER_EPOCH attestation_data = build_attestation_data(state, slot, shard) crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) @@ -341,6 +341,11 @@ def add_attestation_to_state(state, attestation, slot): state_transition(state, block) +def next_slot(state): + block = build_empty_block_for_next_slot(state) + state_transition(state, block) + + def next_epoch(state): block = build_empty_block_for_next_slot(state) block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) From 9489ae5dcdaf68fba9214205f89fcee1d16daea9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sat, 13 Apr 2019 18:02:12 +1000 Subject: [PATCH 269/481] upate validator guide to new crosslink format --- specs/validator/0_beacon-chain-validator.md | 40 ++++++++++----------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 60d283664..77d9f0ce6 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -38,13 +38,13 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - [Attestations](#attestations-1) - [Attestation data](#attestation-data) - [Slot](#slot-1) - - [Shard](#shard) - [Beacon block root](#beacon-block-root) - - [Target root](#target-root) - - [Crosslink data root](#crosslink-data-root) - - [Latest crosslink](#latest-crosslink) - [Source epoch](#source-epoch) - [Source root](#source-root) + - [Target root](#target-root) + - [Shard](#shard) + - [Previous crosslink root](#previous-crosslink-root) + - [Crosslink data root](#crosslink-data-root) - [Construct attestation](#construct-attestation) - [Data](#data) - [Aggregation bitfield](#aggregation-bitfield) @@ -250,14 +250,18 @@ First the validator should construct `attestation_data`, an [`AttestationData`]( Set `attestation_data.slot = head_state.slot`. -##### Shard - -Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`. - ##### Beacon block root Set `attestation_data.beacon_block_root = signing_root(head_block)`. +##### Source epoch + +Set `attestation_data.source_epoch = head_state.justified_epoch`. + +##### Source root + +Set `attestation_data.source_root = head_state.current_justified_root`. + ##### Target root Set `attestation_data.target_root = signing_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. @@ -266,24 +270,20 @@ _Note:_ This can be looked up in the state using: * Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. * Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`. +##### Shard + +Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`. + +##### Previous crosslink root + +Set `attestation_data.previous_crosslink_root = hash_tree_root(head_state.current_crosslinks[shard])`. + ##### Crosslink data root Set `attestation_data.crosslink_data_root = ZERO_HASH`. _Note:_ This is a stub for phase 0. -##### Latest crosslink - -Set `attestation_data.previous_crosslink = head_state.latest_crosslinks[shard]`. - -##### Source epoch - -Set `attestation_data.source_epoch = head_state.justified_epoch`. - -##### Source root - -Set `attestation_data.source_root = head_state.current_justified_root`. - #### Construct attestation Next the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object. From 8807781a8dd22c73865bd9d6deb9c368bfca3484 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 13 Apr 2019 18:16:44 +1000 Subject: [PATCH 270/481] formatting --- specs/light_client/merkle_proofs.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index a3c8fa154..47195b2ca 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -20,10 +20,10 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: ```python -def merkle_tree(leaves): +def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]: o = [0] * len(leaves) + leaves - for i in range(len(leaves)-1, 0, -1): - o[i] = hash(o[i*2] + o[i*2+1]) + for i in range(len(leaves) - 1, 0, -1): + o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o ``` @@ -102,8 +102,8 @@ x x . . . . x * Here is code for creating and verifying a multiproof. First a helper: ```python -def log2(x): - return 0 if x == 1 else 1 + log2(x//2) +def log2(x: int) -> int: + return 0 if x == 1 else 1 + log2(x // 2) ``` First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: @@ -111,7 +111,7 @@ First, a method for computing the generalized indices of the auxiliary tree node ```python def get_proof_indices(tree_indices: List[int]) -> List[int]: # Get all indices touched by the proof - maximal_indices = set({}) + maximal_indices = set() for i in tree_indices: x = i while x > 1: @@ -119,7 +119,7 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]: x //= 2 maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1] # Get indices that cannot be recalculated from earlier indices - redundant_indices = set({}) + redundant_indices = set() proof = [] for index in maximal_indices: if index not in redundant_indices: @@ -130,19 +130,19 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]: break index //= 2 return [i for i in proof if i not in tree_indices] -```` +``` Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order. Here is the verification function: ```python -def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[bytes]): +def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[bytes]) -> bool: tree = {} for index, leaf in zip(indices, leaves): tree[index] = leaf - for index, proofitem in zip(get_proof_indices(indices), proof): - tree[index] = proofitem + for index, proof_item in zip(get_proof_indices(indices), proof): + tree[index] = proof_item index_queue = sorted(tree.keys())[:-1] i = 0 while i < len(index_queue): From 449e8a44a4e9c3040334cac58b204d9ebede0951 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 13 Apr 2019 18:17:09 +1000 Subject: [PATCH 271/481] Remove unused `log2` --- specs/light_client/merkle_proofs.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 47195b2ca..371f0ffde 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -99,14 +99,7 @@ x x . . . . x * . are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`. -Here is code for creating and verifying a multiproof. First a helper: - -```python -def log2(x: int) -> int: - return 0 if x == 1 else 1 + log2(x // 2) -``` - -First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: +Here is code for creating and verifying a multiproof. First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: ```python def get_proof_indices(tree_indices: List[int]) -> List[int]: From 4d2bdf8628eb430c301043e9e4d91b8f883b0ca4 Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:44:14 +1000 Subject: [PATCH 272/481] Cleanup spec --- specs/simple-serialize.md | 88 +++++++++------------------------------ 1 file changed, 19 insertions(+), 69 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 6bd9e3dd7..ff2e207f0 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -12,7 +12,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) - - [Vectors, containers, lists](#composite-types-vectors-containers-and-lists) + - [Vectors, containers, lists](#vectors-containers-lists) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Self-signed containers](#self-signed-containers) @@ -22,8 +22,9 @@ This is a **work in progress** describing typing, serialization and Merkleizatio | Name | Value | Description | |-|-|-| -| `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk. +| `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk. | | `BYTES_PER_LENGTH_PREFIX` | `4` | Number of bytes per serialized length prefix. | +| `BITS_PER_BYTE` | `8` | Number of bits per byte. | ## Typing ### Basic types @@ -56,97 +57,46 @@ We recursively define the `serialize` function which consumes an object `value` > *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, `is_fixed_size`, `is_variable_size` etc.) objects implicitly carry their type. -### Basic Types - -For basic types the `serialize` function is defined as follows. - -#### `"uintN"` - -A byte string of width `N // 8` containing the little-endian encode integer. +### `"uintN"` ```python assert N in [8, 16, 32, 64, 128, 256] return value.to_bytes(N // 8, "little") ``` -#### `"bool"` - -* The byte `\x00` **if** the value is `False` -* The byte `\x01` **if** the value is `True` +### `"bool"` ```python assert value in (True, False) return b"\x01" if value is True else b"\x00" ``` -### Composite Types (Vectors, Containers and Lists) - -The serialized representation of composite types is comprised of two binary sections. - -* The first section is the concatenation of a mixture of either the serialized representation for *fixed size* elements **or** a serialized offset value for *variable size* elements. - - All *fixed size* elements are represented in this section as their serialized representation. - - All *variable size* elements are represented in this section with a `"uint32"` serialized offset where the serialized representation of the element is located in the second section. - - offsets are relative to the beginning of the beginning of the entire serialized representation (the start of the first section) -* The second section is the concatenation of the serialized representations of **only** the *variable size* types. - - This section is empty in the case of a purely *fixed size* type. - - -Offset values are subject to the following validity rules: - -- For Vector and Container types: - - The first offset **must** be equal to the length of the first section. -- For all types: - - Offsets **MAY NOT** be less than any previous offset. - - Offsets **MUST** be less than `2**32` - - -#### `"vector"`, `"container"` and `"list"` - -Below is an illustrative implementation of the `serialize` function for `"Vector"`, -`"Container"` and `"List"` types. +### Vectors, containers, lists ```python -# The second section is just the concatenation of the serialized *variable size* elements -section_2_parts = [ - serialize(element) if is_variable_size(element) - else '' - for element in value -] -section_2_lengths = [len(part) for part in section_2_parts] -section_2 = ''.join(section_2_parts) +# Reccursively serialize fixed-size elements +fixed_parts = [serialize(element) if is_fixed_size(element) else None for element in value] +fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_PREFIX for part in fixed_parts] -# Compute the length of the first section (can also be extracted from the type directly) -section_1_length = sum( - len(serialize(element)) if is_fixed_size(element) - else 4 - for element in value -) +# Reccursively serialize variable-size elements +variable_parts = [serialize(element) if is_variable_size(element) else None for element in value] +variable_lengths = [len(part) if part != None else 0 for part in variable_parts] -# Compute the offset values for each part of the second section -section_1_offsets = [ - section_1_length + sum(section_2_lengths[:element_index]) if is_variable_size(element) - else None - for element_index, element in enumerate(value) -] -assert all(offset is None or offset < 2**32 for offset in section_1_offsets) +# Compute offsets of variable-size elements +assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_PREFIX * BITS_PER_BYTE) +offsets = [sum(fixed_lengths) + sum(variable_lengths[:i]) for i in range(len(value))] -# The first section is the concatenation of the serialized fixed size elements and offsets -section_1_parts = [ - serialize(element) if is_fixed_size(element) - else serialize(section_1_offsets[element_index]) - for element_index, element in enumerate(value) -] -section_1 = ''.join(section_1_parts) +# Interleave offsets in fixed parts +fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] -return ''.join([section_1, section_2]) +# Return the of fixed parts (with interleaved offsets) followed by variable parts +return "".join(fixed_parts + variable_parts) ``` - ## Deserialization Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Efficient algorithms for computing this object can be found in [the implementations](#implementations). - ## Merkleization We first define helper functions: From aaa5a1676572819befe3ca937449e261474e51bb Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:45:18 +1000 Subject: [PATCH 273/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index ff2e207f0..150cf2d54 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -55,7 +55,7 @@ For convenience we alias: We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. -> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, `is_fixed_size`, `is_variable_size` etc.) objects implicitly carry their type. +> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, `is_fixed_size`, `is_variable_size`, etc.) objects implicitly carry their type. ### `"uintN"` From 0695d0ad1c4725733ae0916a56340eb5ccf7c722 Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:48:47 +1000 Subject: [PATCH 274/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 150cf2d54..bc1fa6ccb 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -86,10 +86,10 @@ variable_lengths = [len(part) if part != None else 0 for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_PREFIX * BITS_PER_BYTE) offsets = [sum(fixed_lengths) + sum(variable_lengths[:i]) for i in range(len(value))] -# Interleave offsets in fixed parts +# Interleave offsets with fixed parts fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] -# Return the of fixed parts (with interleaved offsets) followed by variable parts +# Return the fixed parts (with offsets interleaved) followed by variable parts return "".join(fixed_parts + variable_parts) ``` From 35a6311208a61bf2b09850bb7731aa0c968066e4 Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:50:23 +1000 Subject: [PATCH 275/481] Update simple-serialize.md --- specs/simple-serialize.md | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index bc1fa6ccb..5aba9aff5 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -74,19 +74,17 @@ return b"\x01" if value is True else b"\x00" ### Vectors, containers, lists ```python -# Reccursively serialize fixed-size elements +# Reccursively serialize fixed_parts = [serialize(element) if is_fixed_size(element) else None for element in value] -fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_PREFIX for part in fixed_parts] - -# Reccursively serialize variable-size elements variable_parts = [serialize(element) if is_variable_size(element) else None for element in value] + +# Compute and check lengths +fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_PREFIX for part in fixed_parts] variable_lengths = [len(part) if part != None else 0 for part in variable_parts] - -# Compute offsets of variable-size elements assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_PREFIX * BITS_PER_BYTE) -offsets = [sum(fixed_lengths) + sum(variable_lengths[:i]) for i in range(len(value))] -# Interleave offsets with fixed parts +# Compute offsets of variable-size elements, and interleave with fixed parts +offsets = [sum(fixed_lengths) + sum(variable_lengths[:i]) for i in range(len(value))] fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] # Return the fixed parts (with offsets interleaved) followed by variable parts From 80bd4a381b02dd7b226cab59742402990c7a7479 Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:55:08 +1000 Subject: [PATCH 276/481] Update simple-serialize.md --- specs/simple-serialize.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 5aba9aff5..417d70b95 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -23,7 +23,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio | Name | Value | Description | |-|-|-| | `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk. | -| `BYTES_PER_LENGTH_PREFIX` | `4` | Number of bytes per serialized length prefix. | +| `BYTES_PER_LENGTH_OFFSET` | `4` | Number of bytes per serialized length prefix. | | `BITS_PER_BYTE` | `8` | Number of bits per byte. | ## Typing @@ -79,9 +79,9 @@ fixed_parts = [serialize(element) if is_fixed_size(element) else None for elemen variable_parts = [serialize(element) if is_variable_size(element) else None for element in value] # Compute and check lengths -fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_PREFIX for part in fixed_parts] +fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part in fixed_parts] variable_lengths = [len(part) if part != None else 0 for part in variable_parts] -assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_PREFIX * BITS_PER_BYTE) +assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) # Compute offsets of variable-size elements, and interleave with fixed parts offsets = [sum(fixed_lengths) + sum(variable_lengths[:i]) for i in range(len(value))] From 10f3db977dc4f5e9e2ae7a1eb92034b1e4547f3e Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:56:06 +1000 Subject: [PATCH 277/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 417d70b95..a8f0d9add 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -23,7 +23,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio | Name | Value | Description | |-|-|-| | `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk. | -| `BYTES_PER_LENGTH_OFFSET` | `4` | Number of bytes per serialized length prefix. | +| `BYTES_PER_LENGTH_OFFSET` | `4` | Number of bytes per serialized length offset. | | `BITS_PER_BYTE` | `8` | Number of bits per byte. | ## Typing From 27cf02a9b0a1914a92a8b5aac47c44ebe1699996 Mon Sep 17 00:00:00 2001 From: Justin Date: Sat, 13 Apr 2019 23:59:03 +1000 Subject: [PATCH 278/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index a8f0d9add..f57945749 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -84,7 +84,7 @@ variable_lengths = [len(part) if part != None else 0 for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) # Compute offsets of variable-size elements, and interleave with fixed parts -offsets = [sum(fixed_lengths) + sum(variable_lengths[:i]) for i in range(len(value))] +offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] # Return the fixed parts (with offsets interleaved) followed by variable parts From a90bcc0cd4fb186beb0ef166ce7de87f526dad6a Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:00:46 +1000 Subject: [PATCH 279/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index f57945749..a7d80e264 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -87,7 +87,7 @@ assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BIT offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] -# Return the fixed parts (with offsets interleaved) followed by variable parts +# Return the fixed parts (with offsets interleaved) followed by the variable parts return "".join(fixed_parts + variable_parts) ``` From 23c09541e2a44fe4a56641ef354d24eb24c3e461 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:05:43 +1000 Subject: [PATCH 280/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index a7d80e264..41999d1dc 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -83,7 +83,7 @@ fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part variable_lengths = [len(part) if part != None else 0 for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) -# Compute offsets of variable-size elements, and interleave with fixed parts +# Compute offsets of variable-size parts and interleave offsets with fixed parts offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] From f6ed1df62bd09758aebbaaeadf51041df1d62936 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:10:02 +1000 Subject: [PATCH 281/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 41999d1dc..ceb3652f3 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -76,11 +76,11 @@ return b"\x01" if value is True else b"\x00" ```python # Reccursively serialize fixed_parts = [serialize(element) if is_fixed_size(element) else None for element in value] -variable_parts = [serialize(element) if is_variable_size(element) else None for element in value] +variable_parts = [serialize(element) if is_variable_size(element) else "" for element in value] # Compute and check lengths fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part in fixed_parts] -variable_lengths = [len(part) if part != None else 0 for part in variable_parts] +variable_lengths = [len(part) for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) # Compute offsets of variable-size parts and interleave offsets with fixed parts From 9adbaba96e01c756b474f48b13630ec0c339f396 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:14:30 +1000 Subject: [PATCH 282/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index ceb3652f3..6626b9cf4 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -83,11 +83,11 @@ fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part variable_lengths = [len(part) for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) -# Compute offsets of variable-size parts and interleave offsets with fixed parts +# Compute offsets of variable-size parts and interleave offsets with fixed-size parts offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] -# Return the fixed parts (with offsets interleaved) followed by the variable parts +# Return the fixed-size parts (with offsets interleaved) followed by the variable-size parts return "".join(fixed_parts + variable_parts) ``` From 97ca6721056723193d8b8fc69aa8dcadb6c38098 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:18:44 +1000 Subject: [PATCH 283/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 6626b9cf4..b956df4d1 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -85,7 +85,7 @@ assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BIT # Compute offsets of variable-size parts and interleave offsets with fixed-size parts offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] -fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] +fixed_parts = [part if part != None else serialize(offsets[i]) for i, part in enumerate(fixed_parts)] # Return the fixed-size parts (with offsets interleaved) followed by the variable-size parts return "".join(fixed_parts + variable_parts) From 09d927405c9020da2ed466d8e2782929589b9ab1 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:22:41 +1000 Subject: [PATCH 284/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index b956df4d1..1af409c58 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -83,11 +83,11 @@ fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part variable_lengths = [len(part) for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) -# Compute offsets of variable-size parts and interleave offsets with fixed-size parts +# Interleave offsets of variable-size parts with fixed-size parts offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] fixed_parts = [part if part != None else serialize(offsets[i]) for i, part in enumerate(fixed_parts)] -# Return the fixed-size parts (with offsets interleaved) followed by the variable-size parts +# Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts return "".join(fixed_parts + variable_parts) ``` From 7255b0fc0d63ae891910228cfaf635ea66258f7d Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:25:47 +1000 Subject: [PATCH 285/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 1af409c58..cf482c39d 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -84,8 +84,8 @@ variable_lengths = [len(part) for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) # Interleave offsets of variable-size parts with fixed-size parts -offsets = [sum(fixed_lengths + variable_lengths[:i]) for i in range(len(value))] -fixed_parts = [part if part != None else serialize(offsets[i]) for i, part in enumerate(fixed_parts)] +offsets = [serialize(sum(fixed_lengths + variable_lengths[:i])) for i in range(len(value))] +fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts return "".join(fixed_parts + variable_parts) From 59f568073afeb7723ba33fc8fd601375adc5d205 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:26:44 +1000 Subject: [PATCH 286/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index cf482c39d..03e47deb2 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -84,8 +84,8 @@ variable_lengths = [len(part) for part in variable_parts] assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE) # Interleave offsets of variable-size parts with fixed-size parts -offsets = [serialize(sum(fixed_lengths + variable_lengths[:i])) for i in range(len(value))] -fixed_parts = [part if part != None else offsets[i] for i, part in enumerate(fixed_parts)] +variable_offsets = [serialize(sum(fixed_lengths + variable_lengths[:i])) for i in range(len(value))] +fixed_parts = [part if part != None else variable_offsets[i] for i, part in enumerate(fixed_parts)] # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts return "".join(fixed_parts + variable_parts) From 62ffb897ae6949a0acb372be0e31e36dd3f16dd0 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:41:48 +1000 Subject: [PATCH 287/481] Update simple-serialize.md --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 03e47deb2..3f02335db 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -12,7 +12,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) - - [Vectors, containers, lists](#vectors-containers-lists) + - [Containers, vectors, lists](#containers-vectors-lists) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Self-signed containers](#self-signed-containers) @@ -71,7 +71,7 @@ assert value in (True, False) return b"\x01" if value is True else b"\x00" ``` -### Vectors, containers, lists +### Containers, vectors, lists ```python # Reccursively serialize From 30fe6f5657715f71510707f987d74078436a971c Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 00:47:14 +1000 Subject: [PATCH 288/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dfa26c86d..2737d3055 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1867,9 +1867,9 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: - start_epoch = get_previous_epoch(state) + previous_epoch = get_previous_epoch(state) next_epoch = get_current_epoch(state) + 1 - for slot in range(get_epoch_start_slot(start_epoch), get_epoch_start_slot(next_epoch)): + for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_root, participants = get_winning_root_and_participants(state, shard) participating_balance = get_total_balance(state, participants) From 2fceb36f145fe8590bdab0fd15379f5f82f06842 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Sat, 13 Apr 2019 17:01:33 -0500 Subject: [PATCH 289/481] Update README.md --- specs/test_formats/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 2c1ef2d03..3271f55f8 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -84,7 +84,7 @@ The aim is to provide clients with a well-defined scope of work to run a particu - Clients that are complete are expected to contribute to testing, seeking for better resources to get conformance with the spec, and other clients. - Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners. -- Clients that are on older versions can test there work based on older releases of the generated tests, and catch up with newer releases when possible. +- Clients that are on older versions can test their work based on older releases of the generated tests, and catch up with newer releases when possible. ## Test Suite From eafcab7e58bdf5045eb001fcc80f9b7fa8f69d45 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 14 Apr 2019 08:24:54 +1000 Subject: [PATCH 290/481] check crosslinks validity root against previous --- specs/core/0_beacon-chain.md | 4 ++-- tests/phase0/epoch_processing/test_process_crosslinks.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 70b5bac9b..c1dc9de48 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1920,8 +1920,8 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: # do not count as success if winning_root did not or cannot form a chain attempted_crosslink = Crosslink(epoch=slot_to_epoch(slot), crosslink_data_root=winning_root, previous_crosslink_root=previous_crosslink_root) - current_crosslink_root = hash_tree_root(state.current_crosslinks[shard]) - if not current_crosslink_root in {previous_crosslink_root, hash_tree_root(attempted_crosslink) }: + actual_crosslink_root = hash_tree_root(state.previous_crosslinks[shard]) + if not actual_crosslink_root in {previous_crosslink_root, hash_tree_root(attempted_crosslink)}: participants = [] participating_balance = get_total_balance(state, participants) diff --git a/tests/phase0/epoch_processing/test_process_crosslinks.py b/tests/phase0/epoch_processing/test_process_crosslinks.py index 06dc07d85..5f080f6f4 100644 --- a/tests/phase0/epoch_processing/test_process_crosslinks.py +++ b/tests/phase0/epoch_processing/test_process_crosslinks.py @@ -81,10 +81,15 @@ def test_single_crosslink_update_from_previous_epoch(state): assert len(state.previous_epoch_attestations) == 1 pre_state, post_state = run_process_crosslinks(state) + crosslink_deltas = get_crosslink_deltas(state) shard = attestation.data.shard assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + # ensure rewarded + slot = attestation.data.slot + assert crosslink_deltas[0][slot % spec.SLOTS_PER_EPOCH] > 0 + assert crosslink_deltas[1][slot % spec.SLOTS_PER_EPOCH] == 0 return pre_state, post_state From f7c5b0a1c615bc71bda71b71ed6ab5a5ac38b0b5 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 14 Apr 2019 08:30:13 +1000 Subject: [PATCH 291/481] set activation_eligibility_epoch during process_deposit --- specs/core/0_beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bdce2e348..67eb7b000 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1462,7 +1462,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), latest_start_shard=GENESIS_START_SHARD, - + # Exit queue exit_epoch=GENESIS_EPOCH, exit_queue_filled=0, @@ -2271,6 +2271,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: validator = Validator( pubkey=pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, From 37004404d04e0e6d4d7eec95e7097e844fa40106 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 14 Apr 2019 09:13:53 +1000 Subject: [PATCH 292/481] add exit queue test --- specs/core/0_beacon-chain.md | 29 ++-- .../block_processing/test_process_deposit.py | 4 +- .../block_processing/test_voluntary_exit.py | 155 ++++++++++-------- 3 files changed, 106 insertions(+), 82 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 67eb7b000..40363a666 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1311,23 +1311,26 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: Note that this function mutates ``state``. """ validator = state.validator_registry[index] + # Operation is a no-op if validator is already in the queue - if validator.exit_epoch == FAR_FUTURE_EPOCH: - # Update exit queue counters - delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - if state.exit_epoch < delayed_activation_exit_epoch: - state.exit_epoch = delayed_activation_exit_epoch + if validator.exit_epoch != FAR_FUTURE_EPOCH: + return - if state.exit_queue_filled >= MAX_EXITS_PER_EPOCH: - state.exit_epoch += 1 - state.exit_queue_filled = 0 + # Update exit queue counters + delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + if state.exit_epoch < delayed_activation_exit_epoch: + state.exit_epoch = delayed_activation_exit_epoch - # Set validator exit epoch and withdrawable epoch - validator.exit_epoch = state.exit_epoch - validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + if state.exit_queue_filled >= MAX_EXITS_PER_EPOCH: + state.exit_epoch += 1 + state.exit_queue_filled = 0 - # Extend queue - state.exit_queue_filled += 1 + # Set validator exit epoch and withdrawable epoch + validator.exit_epoch = state.exit_epoch + validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY + + # Extend queue + state.exit_queue_filled += 1 ``` #### `slash_validator` diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py index 0726dddef..0c3447d4e 100644 --- a/tests/phase0/block_processing/test_process_deposit.py +++ b/tests/phase0/block_processing/test_process_deposit.py @@ -15,8 +15,8 @@ from tests.phase0.helpers import ( ) -# mark entire file as 'voluntary_exits' -pytestmark = pytest.mark.voluntary_exits +# mark entire file as 'deposits' +pytestmark = pytest.mark.deposits def test_success(state): diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 7627f1f0b..2f0693454 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -18,124 +18,145 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.voluntary_exits -def test_success(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH +def run_voluntary_exit_processing(state, voluntary_exit, valid=True): + """ + Run ``process_voluntary_exit`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) - # - # build voluntary exit - # - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] + if not valid: + with pytest.raises(AssertionError): + process_voluntary_exit(post_state, voluntary_exit) + return state, None + + process_voluntary_exit(post_state, voluntary_exit) + + validator_index = voluntary_exit.validator_index + assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + + return state, post_state + + +def test_success(state): + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( - pre_state, + state, current_epoch, validator_index, privkey, ) - post_state = deepcopy(pre_state) + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit) + return pre_state, voluntary_exit, post_state - # - # test valid exit - # - process_voluntary_exit(post_state, voluntary_exit) - assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH - assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH +def test_success_exit_queue(state): + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(state) + + # exit `MAX_EXITS_PER_EPOCH` + initial_indices = get_active_validator_indices(state.validator_registry,current_epoch)[:spec.MAX_EXITS_PER_EPOCH] + post_state = state + for index in initial_indices: + privkey = pubkey_to_privkey[state.validator_registry[index].pubkey] + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + index, + privkey, + ) + + _, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) + + # exit an additional validator + validator_index = get_active_validator_indices(state.validator_registry,current_epoch)[-1] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + validator_index, + privkey, + ) + + pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) + + assert ( + post_state.validator_registry[validator_index].exit_epoch == + post_state.validator_registry[initial_indices[0]].exit_epoch + 1 + ) return pre_state, voluntary_exit, post_state def test_validator_not_active(state): - pre_state = deepcopy(state) - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - # - # setup pre_state - # - pre_state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH + state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH # # build and test voluntary exit # voluntary_exit = build_voluntary_exit( - pre_state, + state, current_epoch, validator_index, privkey, ) - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False) + return pre_state, voluntary_exit, post_state def test_validator_already_exited(state): - pre_state = deepcopy(state) - # - # setup pre_state - # # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] # but validator already has exited - pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 + state.validator_registry[validator_index].exit_epoch = current_epoch + 2 - # - # build voluntary exit - # voluntary_exit = build_voluntary_exit( - pre_state, + state, current_epoch, validator_index, privkey, ) - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False) + return pre_state, voluntary_exit, post_state def test_validator_not_active_long_enough(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] - # - # build voluntary exit - # voluntary_exit = build_voluntary_exit( - pre_state, + state, current_epoch, validator_index, privkey, ) assert ( - current_epoch - pre_state.validator_registry[validator_index].activation_epoch < + current_epoch - state.validator_registry[validator_index].activation_epoch < spec.PERSISTENT_COMMITTEE_PERIOD ) - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False) + return pre_state, voluntary_exit, post_state From bade9ff3edcd3529a2456b6a62a4fc4102b928c8 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 14 Apr 2019 09:21:29 +1000 Subject: [PATCH 293/481] enhance exit queue test --- tests/phase0/block_processing/test_voluntary_exit.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index 2f0693454..bddf874de 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -76,7 +76,11 @@ def test_success_exit_queue(state): privkey, ) - _, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) + pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) + assert post_state.exit_queue_filled > pre_state.exit_queue_filled + assert post_state.exit_epoch >= pre_state.exit_epoch + + assert post_state.exit_epoch == pre_state.exit_epoch # exit an additional validator validator_index = get_active_validator_indices(state.validator_registry,current_epoch)[-1] @@ -94,6 +98,9 @@ def test_success_exit_queue(state): post_state.validator_registry[validator_index].exit_epoch == post_state.validator_registry[initial_indices[0]].exit_epoch + 1 ) + assert post_state.exit_queue_filled == 1 + assert post_state.exit_epoch == pre_state.exit_epoch + 1 + return pre_state, voluntary_exit, post_state From 5466a4875d894e2ca9ca9d8ee60d63026be5062c Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sun, 14 Apr 2019 09:54:35 +1000 Subject: [PATCH 294/481] Update 1_shard-data-chains.md Fix some typos --- specs/core/1_shard-data-chains.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index dc990567d..6a2094688 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -317,8 +317,8 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], assert len(block.attestations) <= MAX_SHARD_ATTESTIONS for _, attestation in enumerate(block.attestations): assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attesation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY - assert attetation.data.shart == block.shard + assert attestation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY + assert attestation.data.shard == block.shard verify_shard_attestation_signature(beacon_state, attestation) # Check signature From 705b553139c5e8cb523a62010e76850a85054b68 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 14 Apr 2019 12:11:50 +1000 Subject: [PATCH 295/481] Fix --- specs/light_client/sync_protocol.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 7db02050e..795c057ab 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -43,7 +43,7 @@ We add a data type `PeriodData` and four helpers: { 'validator_count': 'uint64', 'seed': 'bytes32', - 'committee': [Validator] + 'committee': [Validator], } ``` @@ -94,8 +94,7 @@ Here is a helper to compute the committee at a slot given the maximal earlier an ```python def compute_committee(header: BeaconBlockHeader, - validator_memory: ValidatorMemory): - + validator_memory: ValidatorMemory) -> List[ValidatorIndex]: earlier_validator_count = validator_memory.earlier_period_data.validator_count later_validator_count = validator_memory.later_period_data.validator_count maximal_earlier_committee = validator_memory.earlier_period_data.committee @@ -108,11 +107,13 @@ def compute_committee(header: BeaconBlockHeader, earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), ) + 1 - - def get_offset(count, end:bool): - return get_split_offset(count, - SHARD_COUNT * committee_count, - validator_memory.shard_id * committee_count + (1 if end else 0)) + + def get_offset(count: int, end: bool) -> int: + return get_split_offset( + count, + SHARD_COUNT * committee_count, + validator_memory.shard_id * committee_count + (1 if end else 0), + ) actual_earlier_committee = maximal_earlier_committee[ 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) @@ -132,7 +133,6 @@ def compute_committee(header: BeaconBlockHeader, [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] ))) - ``` Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`). From 5ed4cb29f6b62887b42db4a4651389594805d9e5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 14 Apr 2019 12:15:24 +1000 Subject: [PATCH 296/481] ValidatorMemory --- specs/light_client/sync_protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 795c057ab..985796863 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -75,7 +75,7 @@ A light client will keep track of: * `later_period_data = get_period_data(finalized_header, shard_id, later=True)` * `earlier_period_data = get_period_data(finalized_header, shard_id, later=False)` -We use the struct `validator_memory` to keep track of these variables. +We use the struct `ValidatorMemory` to keep track of these variables. ### Updating the shuffled committee From f85e7ac44737b76899c1757689febd56775d9060 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 13 Apr 2019 22:14:05 -0500 Subject: [PATCH 297/481] Added churn limit logic --- specs/core/0_beacon-chain.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 40363a666..6d0d84e12 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -93,6 +93,7 @@ - [`is_surround_vote`](#is_surround_vote) - [`integer_squareroot`](#integer_squareroot) - [`get_delayed_activation_exit_epoch`](#get_delayed_activation_exit_epoch) + - [`get_churn_limit`](#get_churn_limit) - [`bls_verify`](#bls_verify) - [`bls_verify_multiple`](#bls_verify_multiple) - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) @@ -182,8 +183,7 @@ These configurations are updated for releases, but may be out of sync during `de | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | -| `MAX_EXITS_PER_EPOCH` | `2**2` (= 4) | -| `MAX_ACTIVATIONS_PER_FINALIZED_EPOCH` | `2**2` (= 4) | +| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -233,6 +233,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | | `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | +| `MAX_FULL_CHURN_EPOCHS` | `2**22` (= 4,194,304) | epochs | ~9 months | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` @@ -1269,6 +1270,15 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: return epoch + 1 + ACTIVATION_EXIT_DELAY ``` +### `get_churn_limit` + +```python +def get_churn_limit(state: BeaconState) -> int: + return max( + MIN_PER_EPOCH_CHURN_LIMIT, + MAX_FULL_CHURN_EPOCHS // len(get_active_validators(state, get_current_epoch(state))) + ) + ### `bls_verify` `bls_verify` is a function for verifying a BLS signature, defined in the [BLS Signature spec](../bls_signature.md#bls_verify). @@ -1321,7 +1331,7 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: if state.exit_epoch < delayed_activation_exit_epoch: state.exit_epoch = delayed_activation_exit_epoch - if state.exit_queue_filled >= MAX_EXITS_PER_EPOCH: + if state.exit_queue_filled >= get_churn_limit(state): state.exit_epoch += 1 state.exit_queue_filled = 0 @@ -1962,7 +1972,7 @@ def update_registry(state: BeaconState) -> None: validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch) ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) - for index in activation_queue[:MAX_ACTIVATIONS_PER_FINALIZED_EPOCH]: + for index in activation_queue[:get_churn_limit(state)]: activate_validator(state, index, is_genesis=False) state.latest_start_shard = ( From 0d6448303d916888c2ba43e1ff0d0358c5829bc0 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 16:49:17 +1000 Subject: [PATCH 298/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6d0d84e12..c86e25ea9 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1278,6 +1278,7 @@ def get_churn_limit(state: BeaconState) -> int: MIN_PER_EPOCH_CHURN_LIMIT, MAX_FULL_CHURN_EPOCHS // len(get_active_validators(state, get_current_epoch(state))) ) +``` ### `bls_verify` From d01fb80fd4a5f704269aeedb6cbca0d90971cb4d Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 17:02:04 +1000 Subject: [PATCH 299/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c86e25ea9..dbce308ad 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -595,8 +595,8 @@ The types are defined topologically to aid in facilitating an executable version 'latest_start_shard': 'uint64', # Exit queue - 'exit_epoch': 'uint64', - 'exit_queue_filled': 'uint64', + 'exit_queue_epoch': 'uint64', + 'exit_queue_churn': 'uint64', # Finality 'previous_epoch_attestations': [PendingAttestation], @@ -1329,19 +1329,20 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: # Update exit queue counters delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - if state.exit_epoch < delayed_activation_exit_epoch: - state.exit_epoch = delayed_activation_exit_epoch + if state.exit_queue_epoch < delayed_activation_exit_epoch: + state.exit_queue_epoch = delayed_activation_exit_epoch + state.exit_queue_churn = 0 - if state.exit_queue_filled >= get_churn_limit(state): - state.exit_epoch += 1 - state.exit_queue_filled = 0 + state.exit_queue_churn += 1 + if state.exit_queue_churn > get_churn_limit(state): + state.exit_queue_epoch += 1 + state.exit_queue_churn = 0 # Set validator exit epoch and withdrawable epoch - validator.exit_epoch = state.exit_epoch + validator.exit_epoch = state.exit_queue_epoch validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY # Extend queue - state.exit_queue_filled += 1 ``` #### `slash_validator` @@ -1478,8 +1479,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], latest_start_shard=GENESIS_START_SHARD, # Exit queue - exit_epoch=GENESIS_EPOCH, - exit_queue_filled=0, + exit_queue_epoch=GENESIS_EPOCH, + exit_queue_churn=0, # Finality previous_epoch_attestations=[], From 15bb9676d5b3dd4cace31058916f071aef8cfcad Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 17:04:36 +1000 Subject: [PATCH 300/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dbce308ad..78aade7cb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1276,7 +1276,7 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: def get_churn_limit(state: BeaconState) -> int: return max( MIN_PER_EPOCH_CHURN_LIMIT, - MAX_FULL_CHURN_EPOCHS // len(get_active_validators(state, get_current_epoch(state))) + MAX_FULL_CHURN_EPOCHS // len(get_active_validator_indices(state, get_current_epoch(state))) ) ``` From f7d3e02eb254749913e3ed652b890983291a153a Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 14 Apr 2019 17:17:09 +1000 Subject: [PATCH 301/481] Add ToC --- specs/light_client/merkle_proofs.md | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 371f0ffde..46fa23f82 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -1,12 +1,26 @@ **NOTICE**: This document is a work-in-progress for researchers and implementers. -### Constants +## Table of Contents + + +- [Table of Contents](#table-of-contents) +- [Constants](#constants) +- [Generalized Merkle tree index](#generalized-merkle-tree-index) +- [SSZ object to index](#ssz-object-to-index) +- [Merkle multiproofs](#merkle-multiproofs) +- [MerklePartial](#merklepartial) + - [`SSZMerklePartial`](#sszmerklepartial) + - [Proofs for execution](#proofs-for-execution) + + + +## Constants | Name | Value | | - | - | | `LENGTH_FLAG` | `2**64 - 1` | -### Generalized Merkle tree index +## Generalized Merkle tree index In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: @@ -29,7 +43,7 @@ def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]: We will define Merkle proofs in terms of generalized indices. -### SSZ object to index +## SSZ object to index We can describe the hash tree of any SSZ object, rooted in `hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For example, an object `{x: bytes32, y: List[uint64]}` would look as follows: @@ -86,7 +100,7 @@ def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int] raise Exception("Unknown type / path") ``` -### Merkle multiproofs +## Merkle multiproofs We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14): @@ -147,11 +161,11 @@ def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], return (indices == []) or (1 in tree and tree[1] == root) ``` -### MerklePartial +## MerklePartial We define: -#### `SSZMerklePartial` +### `SSZMerklePartial` ```python @@ -163,7 +177,7 @@ We define: } ``` -#### Proofs for execution +### Proofs for execution We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `SSZMerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. From 7705ecf89cd240152576527bf290b5a3daeea0d9 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 17:28:45 +1000 Subject: [PATCH 302/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 78aade7cb..6ec3ca739 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -184,6 +184,7 @@ These configurations are updated for releases, but may be out of sync during `de | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | +| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -237,7 +238,6 @@ These configurations are updated for releases, but may be out of sync during `de * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` - ### State list lengths | Name | Value | Unit | Duration | @@ -746,11 +746,11 @@ def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: ### `get_active_validator_indices` ```python -def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: +def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]: """ - Get indices of active validators from ``validators``. + Get active validator indices at ``epoch``. """ - return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] + return [i for i, v in enumerate(state.validator_registry) if is_active_validator(v, epoch)] ``` ### `get_balance` @@ -844,7 +844,7 @@ def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: """ Return the number of committees in one epoch. """ - active_validators = get_active_validator_indices(state.validator_registry, epoch) + active_validators = get_active_validator_indices(state, epoch) return max( 1, min( @@ -896,10 +896,7 @@ def get_crosslink_committees_at_slot(state: BeaconState, next_epoch = current_epoch + 1 assert previous_epoch <= epoch <= next_epoch - indices = get_active_validator_indices( - state.validator_registry, - epoch, - ) + indices = get_active_validator_indices(state, epoch) if epoch == current_epoch: start_shard = state.latest_start_shard @@ -1276,7 +1273,7 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: def get_churn_limit(state: BeaconState) -> int: return max( MIN_PER_EPOCH_CHURN_LIMIT, - MAX_FULL_CHURN_EPOCHS // len(get_active_validator_indices(state, get_current_epoch(state))) + len(get_active_validator_indices(state, get_current_epoch(state))) // CHURN_LIMIT_QUOTIENT ) ``` @@ -1517,7 +1514,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: activate_validator(state, validator_index, is_genesis=True) - genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) + genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root @@ -1651,12 +1648,12 @@ We define some helper functions utilized when processing an epoch transition: ```python def get_current_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_current_epoch(state))) + return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) ``` ```python def get_previous_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_previous_epoch(state))) + return get_total_balance(state, get_active_validator_indices(state, get_previous_epoch(state))) ``` ```python @@ -1994,7 +1991,7 @@ def process_slashings(state: BeaconState) -> None: Note that this function mutates ``state``. """ current_epoch = get_current_epoch(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) + active_validator_indices = get_active_validator_indices(state, current_epoch) total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` @@ -2022,7 +2019,7 @@ def finish_epoch_update(state: BeaconState) -> None: # Set active index root index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH state.latest_active_index_roots[index_root_position] = hash_tree_root( - get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY) + get_active_validator_indices(state, next_epoch + ACTIVATION_EXIT_DELAY) ) # Set total slashed balances state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = ( From da4a1430eaaa6df49a3231ca94b515abeba1dec8 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 14 Apr 2019 17:52:50 +1000 Subject: [PATCH 303/481] fix test --- .../block_processing/test_voluntary_exit.py | 23 ++++++++++--------- tests/phase0/helpers.py | 2 +- tests/phase0/test_sanity.py | 14 +++++------ 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index bddf874de..b8af85a97 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -5,6 +5,7 @@ import build.phase0.spec as spec from build.phase0.spec import ( get_active_validator_indices, + get_churn_limit, get_current_epoch, process_voluntary_exit, ) @@ -44,7 +45,7 @@ def test_success(state): state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + validator_index = get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( @@ -65,7 +66,7 @@ def test_success_exit_queue(state): current_epoch = get_current_epoch(state) # exit `MAX_EXITS_PER_EPOCH` - initial_indices = get_active_validator_indices(state.validator_registry,current_epoch)[:spec.MAX_EXITS_PER_EPOCH] + initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)] post_state = state for index in initial_indices: privkey = pubkey_to_privkey[state.validator_registry[index].pubkey] @@ -77,13 +78,13 @@ def test_success_exit_queue(state): ) pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) - assert post_state.exit_queue_filled > pre_state.exit_queue_filled - assert post_state.exit_epoch >= pre_state.exit_epoch + assert post_state.exit_queue_churn > pre_state.exit_queue_churn + assert post_state.exit_queue_epoch >= pre_state.exit_queue_epoch - assert post_state.exit_epoch == pre_state.exit_epoch + assert post_state.exit_queue_epoch == pre_state.exit_queue_epoch # exit an additional validator - validator_index = get_active_validator_indices(state.validator_registry,current_epoch)[-1] + validator_index = get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( state, @@ -98,8 +99,8 @@ def test_success_exit_queue(state): post_state.validator_registry[validator_index].exit_epoch == post_state.validator_registry[initial_indices[0]].exit_epoch + 1 ) - assert post_state.exit_queue_filled == 1 - assert post_state.exit_epoch == pre_state.exit_epoch + 1 + assert post_state.exit_queue_churn == 0 + assert post_state.exit_queue_epoch == pre_state.exit_queue_epoch + 1 return pre_state, voluntary_exit, post_state @@ -107,7 +108,7 @@ def test_success_exit_queue(state): def test_validator_not_active(state): current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + validator_index = get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH @@ -131,7 +132,7 @@ def test_validator_already_exited(state): state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + validator_index = get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] # but validator already has exited @@ -150,7 +151,7 @@ def test_validator_already_exited(state): def test_validator_not_active_long_enough(state): current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[0] + validator_index = get_active_validator_indices(state, current_epoch)[0] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] voluntary_exit = build_voluntary_exit( diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 5fe22e6a4..8c8064fc1 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -199,7 +199,7 @@ def build_deposit(state, def get_valid_proposer_slashing(state): current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[-1] + validator_index = get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] slot = state.slot diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 9b2bf9c7f..f9e62620c 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -278,7 +278,7 @@ def test_attestation(state): def test_voluntary_exit(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( - pre_state.validator_registry, + pre_state, get_current_epoch(pre_state) )[-1] @@ -326,7 +326,7 @@ def test_voluntary_exit(state): def test_no_exit_churn_too_long_since_change(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( - pre_state.validator_registry, + pre_state, get_current_epoch(pre_state) )[-1] @@ -346,8 +346,8 @@ def test_no_exit_churn_too_long_since_change(state): state_transition(post_state, block) assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH - assert post_state.exit_queue_filled == pre_state.exit_queue_filled - assert post_state.exit_epoch == pre_state.exit_epoch + assert post_state.exit_queue_churn == pre_state.exit_queue_churn + assert post_state.exit_queue_epoch == pre_state.exit_queue_epoch return pre_state, [block], post_state @@ -355,8 +355,8 @@ def test_no_exit_churn_too_long_since_change(state): def test_transfer(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) - sender_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] - recipient_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + sender_index = get_active_validator_indices(pre_state, current_epoch)[-1] + recipient_index = get_active_validator_indices(pre_state, current_epoch)[0] transfer_pubkey = pubkeys[-1] transfer_privkey = privkeys[-1] amount = get_balance(pre_state, sender_index) @@ -407,7 +407,7 @@ def test_balance_driven_status_transitions(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] + validator_index = get_active_validator_indices(pre_state, current_epoch)[-1] assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH From 229af3dedacfd5a7abf6f005cd59e35d5f20d338 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 18:10:44 +1000 Subject: [PATCH 304/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6ec3ca739..3f92e8b6f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -594,10 +594,6 @@ The types are defined topologically to aid in facilitating an executable version 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], 'latest_start_shard': 'uint64', - # Exit queue - 'exit_queue_epoch': 'uint64', - 'exit_queue_churn': 'uint64', - # Finality 'previous_epoch_attestations': [PendingAttestation], 'current_epoch_attestations': [PendingAttestation], @@ -1324,19 +1320,21 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: if validator.exit_epoch != FAR_FUTURE_EPOCH: return - # Update exit queue counters - delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - if state.exit_queue_epoch < delayed_activation_exit_epoch: - state.exit_queue_epoch = delayed_activation_exit_epoch - state.exit_queue_churn = 0 + # Compute exit queue parameters + exit_queue_epoch = sorted([validator.exit_epoch for validator in state.validator_registry if + validator.exit_epoch != FAR_FUTURE_EPOCH + ].append(GENESIS_EPOCH), key=lambda index: state.validator_registry[index].exit_epoch)[-1] - state.exit_queue_churn += 1 - if state.exit_queue_churn > get_churn_limit(state): - state.exit_queue_epoch += 1 - state.exit_queue_churn = 0 + delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + if exit_queue_epoch < delayed_activation_exit_epoch: + exit_queue_epoch = delayed_activation_exit_epoch + + exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) + if exit_queue_churn > get_churn_limit(state): + exit_queue_epoch += 1 # Set validator exit epoch and withdrawable epoch - validator.exit_epoch = state.exit_queue_epoch + validator.exit_epoch = exit_queue_epoch validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY # Extend queue @@ -1475,10 +1473,6 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), latest_start_shard=GENESIS_START_SHARD, - # Exit queue - exit_queue_epoch=GENESIS_EPOCH, - exit_queue_churn=0, - # Finality previous_epoch_attestations=[], current_epoch_attestations=[], From 2f2e7847de10e4b424b885108936f5aaacca4dac Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 14 Apr 2019 18:13:43 +1000 Subject: [PATCH 305/481] More fix --- specs/light_client/sync_protocol.md | 63 ++++++++++++++++++++--------- 1 file changed, 43 insertions(+), 20 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 985796863..3850f077d 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -9,6 +9,13 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) - [Table of Contents](#table-of-contents) - [Preliminaries](#preliminaries) + - [Expansions](#expansions) + - [`get_active_validator_indices`](#get_active_validator_indices) + - [`MerklePartial`](#merklepartial) + - [`PeriodData`](#perioddata) + - [`get_earlier_start_epoch`](#get_earlier_start_epoch) + - [`get_later_start_epoch`](#get_later_start_epoch) + - [`get_period_data`](#get_period_data) - [Light client state](#light-client-state) - [Updating the shuffled committee](#updating-the-shuffled-committee) - [Computing the current committee](#computing-the-current-committee) @@ -16,28 +23,34 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - ## Preliminaries +### Expansions + We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`). We define two expansions: -* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState` -* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])` +* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])`. +* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`. + +### `get_active_validator_indices` Note that there is now a new way to compute `get_active_validator_indices`: ```python -def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]: +def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]: return state.latest_active_indices[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] ``` Note that it takes `state` instead of `state.validator_registry` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments. + +### `MerklePartial` + A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof. -We add a data type `PeriodData` and four helpers: +### `PeriodData` ```python { @@ -47,13 +60,23 @@ We add a data type `PeriodData` and four helpers: } ``` +### `get_earlier_start_epoch` + ```python def get_earlier_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2 - +``` + +### `get_later_start_epoch` + +```python def get_later_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD - +``` + +### `get_period_data` + +```python def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> PeriodData: period_start = get_later_start_epoch(header.slot) if later else get_earlier_start_epoch(header.slot) validator_count = len(get_active_validator_indices(state, period_start)) @@ -62,7 +85,7 @@ def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> return PeriodData( validator_count, generate_seed(block.state, period_start), - [block.state.validator_registry[i] for i in indices] + [block.state.validator_registry[i] for i in indices], ) ``` @@ -114,7 +137,7 @@ def compute_committee(header: BeaconBlockHeader, SHARD_COUNT * committee_count, validator_memory.shard_id * committee_count + (1 if end else 0), ) - + actual_earlier_committee = maximal_earlier_committee[ 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) ] @@ -123,15 +146,15 @@ def compute_committee(header: BeaconBlockHeader, ] def get_switchover_epoch(index): return ( - bytes_to_int(hash(validator_memory.earlier_period_data.seed + bytes3(index))[0:8]) % + bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD ) # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated return sorted(list(set( - [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + - [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] + [i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + + [i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] ))) ``` @@ -154,23 +177,23 @@ The verification procedure is as follows: ```python def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool: - assert proof.shard_parent_block.beacon_chain_ref == hash_tree_root(proof.header) + assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header) committee = compute_committee(proof.header, validator_memory) # Verify that we have >=50% support - support_balance = sum([c.high_balance for i, c in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) - total_balance = sum([c.high_balance for i, c in enumerate(committee)] + support_balance = sum([v.high_balance for i, v in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) + total_balance = sum([v.high_balance for i, v in enumerate(committee)]) assert support_balance * 2 > total_balance # Verify shard attestations group_public_key = bls_aggregate_pubkeys([ - v.pubkey for v, index in enumerate(committee) if - get_bitfield_bit(proof.shard_bitfield, i) is True + v.pubkey for v, index in enumerate(committee) + if get_bitfield_bit(proof.shard_bitfield, index) is True ]) assert bls_verify( pubkey=group_public_key, message_hash=hash_tree_root(shard_parent_block), - signature=shard_aggregate_signature, - domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER) + signature=proof.shard_aggregate_signature, + domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER), ) ``` -The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_ref, ShardBlock)`, which would cut off ~220 bytes. +The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes. From 02cfbca81f1da76d2eb92c934f9fdd81550d0566 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 14 Apr 2019 18:17:43 +1000 Subject: [PATCH 306/481] Remove blanks --- specs/light_client/sync_protocol.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 3850f077d..d5647f0c5 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -125,7 +125,7 @@ def compute_committee(header: BeaconBlockHeader, earlier_start_epoch = get_earlier_start_epoch(header.slot) later_start_epoch = get_later_start_epoch(header.slot) epoch = slot_to_epoch(header.slot) - + committee_count = max( earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), @@ -137,7 +137,7 @@ def compute_committee(header: BeaconBlockHeader, SHARD_COUNT * committee_count, validator_memory.shard_id * committee_count + (1 if end else 0), ) - + actual_earlier_committee = maximal_earlier_committee[ 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) ] From 0b770121fec097611397802a1674c81602517678 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 18:23:30 +1000 Subject: [PATCH 307/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3f92e8b6f..0531f9807 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -234,7 +234,6 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | | `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | -| `MAX_FULL_CHURN_EPOCHS` | `2**22` (= 4,194,304) | epochs | ~9 months | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` From 06807cf5201e1facf8438bf5cdaa76ec0cdb8ff4 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Sun, 14 Apr 2019 18:50:05 +1000 Subject: [PATCH 308/481] fix tests and off by one error --- specs/core/0_beacon-chain.md | 11 ++++++----- tests/phase0/block_processing/test_voluntary_exit.py | 7 ------- tests/phase0/test_sanity.py | 2 -- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3f92e8b6f..352fccf76 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1320,17 +1320,18 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: if validator.exit_epoch != FAR_FUTURE_EPOCH: return - # Compute exit queue parameters - exit_queue_epoch = sorted([validator.exit_epoch for validator in state.validator_registry if - validator.exit_epoch != FAR_FUTURE_EPOCH - ].append(GENESIS_EPOCH), key=lambda index: state.validator_registry[index].exit_epoch)[-1] + # Compute exit queue parameters (pad with GENESIS_EPOCH in case empty) + exit_queue_epoch = sorted([ + validator.exit_epoch for validator in state.validator_registry + if validator.exit_epoch != FAR_FUTURE_EPOCH + ] + [GENESIS_EPOCH])[-1] delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) if exit_queue_epoch < delayed_activation_exit_epoch: exit_queue_epoch = delayed_activation_exit_epoch exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) - if exit_queue_churn > get_churn_limit(state): + if exit_queue_churn >= get_churn_limit(state): exit_queue_epoch += 1 # Set validator exit epoch and withdrawable epoch diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py index b8af85a97..eb01c2a8a 100644 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ b/tests/phase0/block_processing/test_voluntary_exit.py @@ -78,10 +78,6 @@ def test_success_exit_queue(state): ) pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) - assert post_state.exit_queue_churn > pre_state.exit_queue_churn - assert post_state.exit_queue_epoch >= pre_state.exit_queue_epoch - - assert post_state.exit_queue_epoch == pre_state.exit_queue_epoch # exit an additional validator validator_index = get_active_validator_indices(state, current_epoch)[-1] @@ -99,9 +95,6 @@ def test_success_exit_queue(state): post_state.validator_registry[validator_index].exit_epoch == post_state.validator_registry[initial_indices[0]].exit_epoch + 1 ) - assert post_state.exit_queue_churn == 0 - assert post_state.exit_queue_epoch == pre_state.exit_queue_epoch + 1 - return pre_state, voluntary_exit, post_state diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index f9e62620c..08c7610c0 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -346,8 +346,6 @@ def test_no_exit_churn_too_long_since_change(state): state_transition(post_state, block) assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH - assert post_state.exit_queue_churn == pre_state.exit_queue_churn - assert post_state.exit_queue_epoch == pre_state.exit_queue_epoch return pre_state, [block], post_state From 0908ffa653a5a9569e1e8798add8c8c81500f3a1 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 19:01:53 +1000 Subject: [PATCH 309/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cd8e1a427..ec12e4000 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1313,22 +1313,15 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: Initiate the validator of the given ``index``. Note that this function mutates ``state``. """ + # Return if validator already initiated exit validator = state.validator_registry[index] - - # Operation is a no-op if validator is already in the queue if validator.exit_epoch != FAR_FUTURE_EPOCH: return - # Compute exit queue parameters (pad with GENESIS_EPOCH in case empty) - exit_queue_epoch = sorted([ - validator.exit_epoch for validator in state.validator_registry - if validator.exit_epoch != FAR_FUTURE_EPOCH - ] + [GENESIS_EPOCH])[-1] - - delayed_activation_exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) - if exit_queue_epoch < delayed_activation_exit_epoch: - exit_queue_epoch = delayed_activation_exit_epoch - + # Compute exit queue epoch + exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH] + latest_exit_epoch = GENESIS_EPOCH if len(exit_epochs) == 0 else sorted(exit_epochs)[-1] + exit_queue_epoch = max(latest_exit_epoch, get_delayed_activation_exit_epoch(get_current_epoch(state))) exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) if exit_queue_churn >= get_churn_limit(state): exit_queue_epoch += 1 @@ -1336,8 +1329,6 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: # Set validator exit epoch and withdrawable epoch validator.exit_epoch = exit_queue_epoch validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY - - # Extend queue ``` #### `slash_validator` From 875b2ba00dfd0b0b5eccd7d0bfb2833a9fd6c0d2 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 19:11:40 +1000 Subject: [PATCH 310/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ec12e4000..97abdbee7 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1320,8 +1320,8 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: # Compute exit queue epoch exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH] - latest_exit_epoch = GENESIS_EPOCH if len(exit_epochs) == 0 else sorted(exit_epochs)[-1] - exit_queue_epoch = max(latest_exit_epoch, get_delayed_activation_exit_epoch(get_current_epoch(state))) + exit_epochs += [get_delayed_activation_exit_epoch(get_current_epoch(state))] + exit_queue_epoch = sorted(exit_epochs)[-1] exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) if exit_queue_churn >= get_churn_limit(state): exit_queue_epoch += 1 From 3394368a6674241be42b7fa65cac4d41868a4908 Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 19:14:27 +1000 Subject: [PATCH 311/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 97abdbee7..78da35ea3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1320,8 +1320,7 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: # Compute exit queue epoch exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH] - exit_epochs += [get_delayed_activation_exit_epoch(get_current_epoch(state))] - exit_queue_epoch = sorted(exit_epochs)[-1] + exit_queue_epoch = sorted(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))])[-1] exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) if exit_queue_churn >= get_churn_limit(state): exit_queue_epoch += 1 From be86f966f87958856584b3f20c095abf910a3d0c Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Sun, 14 Apr 2019 19:18:00 +1000 Subject: [PATCH 312/481] fix transfer invariant, credits to @holiman for finding the edge case (#916) --- specs/core/0_beacon-chain.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f6427f1d4..df9ace80b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2416,6 +2416,8 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: get_balance(state, transfer.sender) == transfer.amount + transfer.fee or get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT ) + # No self-transfers (to enforce >= MIN_DEPOSIT_AMOUNT or zero balance invariant) + assert transfer.sender != transfer.recipient # A transfer is valid in only one slot assert state.slot == transfer.slot # Only withdrawn or not-yet-deposited accounts can transfer From 18d54fa1f82aba3270e3ada8133d7746f64893e0 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 14 Apr 2019 19:38:37 +1000 Subject: [PATCH 313/481] fix deposit contract placeholder address length --- configs/constant_presets/mainnet.yaml | 2 +- configs/constant_presets/minimal.yaml | 2 +- scripts/phase0/function_puller.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index 27085d40a..d2d91d02c 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -22,7 +22,7 @@ SHUFFLE_ROUND_COUNT: 90 # Deposit contract # --------------------------------------------------------------- # **TBD** -DEPOSIT_CONTRACT_ADDRESS: 0x12345678901235678901234567890123567890 +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 # 2**5 ` (= 32) DEPOSIT_CONTRACT_TREE_DEPTH: 32 diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index 8997bc5ed..c1d69bcd4 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -22,7 +22,7 @@ SHUFFLE_ROUND_COUNT: 10 # Deposit contract # --------------------------------------------------------------- # **TBD** -DEPOSIT_CONTRACT_ADDRESS: 0x12345678901235678901234567890123567890 +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 # 2**5 ` (= 32) DEPOSIT_CONTRACT_TREE_DEPTH: 32 diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index fc7f9fb8c..812498b2b 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -53,7 +53,7 @@ def get_spec(file_name) -> List[str]: if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': eligible = False if eligible: - code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123567890123456789012357890'))) + code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890'))) # Build type-def re-initialization code_lines.append('') code_lines.append('def init_SSZ_types():') From 514d8f9232a58c8117a51c271405bf95dc6a4695 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 14 Apr 2019 20:17:22 +1000 Subject: [PATCH 314/481] Fix runner/handler references --- test_generators/bls/main.py | 6 ++++++ test_generators/operations/deposits.py | 6 ++++-- test_generators/shuffling/main.py | 2 ++ test_generators/ssz/main.py | 9 ++++++--- test_libs/gen_helpers/gen_base/gen_suite.py | 2 ++ 5 files changed, 20 insertions(+), 5 deletions(-) diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py index da6a79aae..ef80635de 100644 --- a/test_generators/bls/main.py +++ b/test_generators/bls/main.py @@ -166,6 +166,7 @@ def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOu forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="bls", handler="msg_hash_uncompressed", test_cases=case01_message_hash_G2_uncompressed())) @@ -177,6 +178,7 @@ def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutp forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="bls", handler="msg_hash_compressed", test_cases=case02_message_hash_G2_compressed())) @@ -189,6 +191,7 @@ def bls_priv_to_pub_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="bls", handler="priv_to_pub", test_cases=case03_private_to_public_key())) @@ -200,6 +203,7 @@ def bls_sign_msg_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="bls", handler="sign_msg", test_cases=case04_sign_messages())) @@ -211,6 +215,7 @@ def bls_aggregate_sigs_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="bls", handler="aggregate_sigs", test_cases=case06_aggregate_sigs())) @@ -222,6 +227,7 @@ def bls_aggregate_pubkeys_suite(configs_path: str) -> gen_typing.TestSuiteOutput forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="bls", handler="aggregate_pubkeys", test_cases=case07_aggregate_pubkeys())) diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index a72b1fbaa..e92731cbb 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -155,7 +155,8 @@ def mini_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="testing", forks=["phase0"], config="minimal", - handler="core", + runner="operations", + handler="deposits", test_cases=deposit_cases())) @@ -169,5 +170,6 @@ def full_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="mainnet", forks=["phase0"], config="mainnet", - handler="core", + runner="operations", + handler="deposits", test_cases=deposit_cases())) diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py index e8b2054a2..2c4faeb8f 100644 --- a/test_generators/shuffling/main.py +++ b/test_generators/shuffling/main.py @@ -30,6 +30,7 @@ def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="testing", forks=["phase0"], config="minimal", + runner="shuffling", handler="core", test_cases=shuffling_test_cases())) @@ -44,6 +45,7 @@ def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline="mainnet", forks=["phase0"], config="mainnet", + runner="shuffling", handler="core", test_cases=shuffling_test_cases())) diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py index 0e5d9d7c8..c1af4ce5f 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz/main.py @@ -14,7 +14,8 @@ def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline= "mainnet", forks=["phase0"], config="mainnet", - handler="core", + runner="ssz", + handler="uint", test_cases=generate_random_uint_test_cases())) def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: @@ -24,7 +25,8 @@ def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline= "mainnet", forks=["phase0"], config="mainnet", - handler="core", + runner="ssz", + handler="uint", test_cases=generate_uint_wrong_length_test_cases())) def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: @@ -34,7 +36,8 @@ def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: forks_timeline= "mainnet", forks=["phase0"], config="mainnet", - handler="core", + runner="ssz", + handler="uint", test_cases=generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases())) diff --git a/test_libs/gen_helpers/gen_base/gen_suite.py b/test_libs/gen_helpers/gen_base/gen_suite.py index 3459d9ae3..a3f88791f 100644 --- a/test_libs/gen_helpers/gen_base/gen_suite.py +++ b/test_libs/gen_helpers/gen_base/gen_suite.py @@ -9,6 +9,7 @@ def render_suite(*, title: str, summary: str, forks_timeline: str, forks: Iterable[str], config: str, + runner: str, handler: str, test_cases: Iterable[TestCase]): yield "title", title @@ -16,5 +17,6 @@ def render_suite(*, yield "forks_timeline", forks_timeline, yield "forks", forks yield "config", config + yield "runner", runner yield "handler", handler yield "test_cases", test_cases From a376b6607fe5e6406371f44254960e891ee5ee8d Mon Sep 17 00:00:00 2001 From: Justin Date: Sun, 14 Apr 2019 21:53:32 +1000 Subject: [PATCH 315/481] Cleaner dust checking in transfers Inspired by [this](https://github.com/ethereum/eth2.0-specs/pull/916#commitcomment-33170877). --- specs/core/0_beacon-chain.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 649254f44..5a205e562 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2334,14 +2334,6 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: """ # Verify the amount and fee aren't individually too big (for anti-overflow purposes) assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) - # Verify that we have enough ETH to send, and that after the transfer the balance will be either - # exactly zero or at least MIN_DEPOSIT_AMOUNT - assert ( - get_balance(state, transfer.sender) == transfer.amount + transfer.fee or - get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT - ) - # No self-transfers (to enforce >= MIN_DEPOSIT_AMOUNT or zero balance invariant) - assert transfer.sender != transfer.recipient # A transfer is valid in only one slot assert state.slot == transfer.slot # Only withdrawn or not-yet-deposited accounts can transfer @@ -2365,6 +2357,9 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) increase_balance(state, transfer.recipient, transfer.amount) increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) + # Verify balances are not dust + assert not (0 < get_balance(state, transfer.sender) < MIN_DEPOSIT_AMOUNT) + assert not (0 < get_balance(state, transfer.recipient) < MIN_DEPOSIT_AMOUNT) ``` #### State root verification From 3c8d1b23a52c8be12c01628d8546a4443a217ca7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sun, 14 Apr 2019 22:12:39 +1000 Subject: [PATCH 316/481] Update specs/core/0_beacon-chain.md Co-Authored-By: djrtwo --- specs/core/0_beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3be5deda5..accade599 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -691,6 +691,7 @@ def slot_to_epoch(slot: Slot) -> Epoch: def get_previous_epoch(state: BeaconState) -> Epoch: """` Return the previous epoch of the given ``state``. + Return the current epoch if it's genesis epoch. """ current_epoch = get_current_epoch(state) return (current_epoch - 1) if current_epoch > GENESIS_EPOCH else current_epoch From 9bb902217d5ba66061e589ac22d41df66750eb4d Mon Sep 17 00:00:00 2001 From: protolambda Date: Sun, 14 Apr 2019 22:54:01 +1000 Subject: [PATCH 317/481] change wording deposit case format --- specs/test_formats/operations/deposits.md | 8 ++++---- test_generators/operations/deposits.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/specs/test_formats/operations/deposits.md b/specs/test_formats/operations/deposits.md index 5aaed24f7..2bce84e30 100644 --- a/specs/test_formats/operations/deposits.md +++ b/specs/test_formats/operations/deposits.md @@ -5,10 +5,10 @@ A deposit is a form of an operation (or "transaction"), modifying the state. ## Test case format ```yaml -case: string -- description of test case, purely for debugging purposes -pre: BeaconState -- state before applying the deposit -deposit: Deposit -- the deposit -post: BeaconState -- state after applying the deposit. No value if deposit processing is aborted. +description: string -- description of test case, purely for debugging purposes +pre: BeaconState -- state before applying the deposit +deposit: Deposit -- the deposit +post: BeaconState -- state after applying the deposit. No value if deposit processing is aborted. ``` ## Condition diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index e92731cbb..bc2b84215 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -91,7 +91,7 @@ def valid_deposit(): new_dep, state, leaves = build_deposit_for_index(10, 10) state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) state.latest_eth1_data.deposit_count = len(leaves) - yield 'case', 'valid deposit to add new validator' + yield 'description', 'valid deposit to add new validator' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) spec.process_deposit(state, new_dep) @@ -103,7 +103,7 @@ def valid_topup(): new_dep, state, leaves = build_deposit_for_index(10, 3) state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) state.latest_eth1_data.deposit_count = len(leaves) - yield 'case', 'valid deposit to top-up existing validator' + yield 'description', 'valid deposit to top-up existing validator' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) spec.process_deposit(state, new_dep) @@ -118,7 +118,7 @@ def invalid_deposit_index(): # Mess up deposit index, 1 too small state.deposit_index = 9 - yield 'case', 'invalid deposit index' + yield 'description', 'invalid deposit index' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) yield 'post', None @@ -131,7 +131,7 @@ def invalid_deposit_proof(): # Make deposit proof invalid (at bottom of proof) new_dep.proof[-1] = spec.ZERO_HASH - yield 'case', 'invalid deposit proof' + yield 'description', 'invalid deposit proof' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) yield 'post', None From bcf10ecf11fcf137cb5753dbef5092502e1158b8 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sun, 14 Apr 2019 22:55:38 +1000 Subject: [PATCH 318/481] Update 0_beacon-chain.md (#921) Fix typo to set the right property on the correct object --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 649254f44..d4548f05a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1937,7 +1937,7 @@ def process_balance_driven_status_transitions(state: BeaconState) -> None: for index, validator in enumerate(state.validator_registry): balance = get_balance(state, index) if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: - state.activation_eligibility_epoch = get_current_epoch(state) + validator.activation_eligibility_epoch = get_current_epoch(state) if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: initiate_validator_exit(state, index) From c01fb1eea69f64f9e33f23dacdd74c5bb61da343 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Sun, 14 Apr 2019 22:56:01 +1000 Subject: [PATCH 319/481] Update 0_beacon-chain.md (#922) More clean up on recent switch to exit queue --- specs/core/0_beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d4548f05a..aa3726294 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2271,7 +2271,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, slashed=False, high_balance=0 ) From 40d6a2635af9603f96132dac9072ac35192a4857 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 15 Apr 2019 07:03:47 +1000 Subject: [PATCH 320/481] Update 0_beacon-chain.md (#923) Fix another typo w/ the withdrawal ~> exit queue PR --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4e697139e..2d150837e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1950,7 +1950,7 @@ Run the following function: ```python def update_registry(state: BeaconState) -> None: activation_queue = sorted([ - validator for validator in state.validator_registry if + index for index, validator in enumerate(state.validator_registry) if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch) ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) From 9eba123e2e2e559c16b1b34335b7cd9b8ddc3c55 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 15 Apr 2019 07:54:08 +1000 Subject: [PATCH 321/481] Remove serialization from consensus Consensus now only cares about Merkleisation (i.e. `hash_tree_root`), not about serialization (i.e. `serialize`). This simplifies consensus code by a few tens of lines, is conceptually cleaner, and is more future proof. A corresponding change is required in the deposit contract. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2d150837e..cb57c0a45 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2230,7 +2230,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the Merkle branch merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization + leaf=hash_tree_root(deposit.data), proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, index=deposit.index, From a25c436b78983b68ee40b40c79df3583b8c14159 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 15 Apr 2019 08:14:33 +1000 Subject: [PATCH 322/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cb57c0a45..dbf399a8c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1360,7 +1360,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### Deposit arguments -The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`. +The deposit contract has a single `deposit` function which takes as argument the `DepositData` fields. ### Withdrawal credentials From b6b82ae494edca81d92ad9ca5872c641f7fc3a7e Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 15 Apr 2019 08:15:20 +1000 Subject: [PATCH 323/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dbf399a8c..dbc1c9b5b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1360,7 +1360,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus ### Deposit arguments -The deposit contract has a single `deposit` function which takes as argument the `DepositData` fields. +The deposit contract has a single `deposit` function which takes as argument the `DepositData` elements. ### Withdrawal credentials From 084919e06383d1959d15484368b5f67e1ae61792 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 15 Apr 2019 08:29:08 +1000 Subject: [PATCH 324/481] Adjust tests --- tests/phase0/helpers.py | 4 ++-- tests/phase0/test_sanity.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 8c8064fc1..e20bb9484 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -61,7 +61,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=N amount=spec.MAX_DEPOSIT_AMOUNT, proof_of_possession=proof_of_possession, ) - item = hash(deposit_data.serialize()) + item = deposit_data.hash_tree_root() deposit_data_leaves.append(item) tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) root = get_merkle_root((tuple(deposit_data_leaves))) @@ -180,7 +180,7 @@ def build_deposit(state, amount): deposit_data = build_deposit_data(state, pubkey, privkey, amount) - item = hash(deposit_data.serialize()) + item = deposit_data.hash_tree_root() index = len(deposit_data_leaves) deposit_data_leaves.append(item) tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) diff --git a/tests/phase0/test_sanity.py b/tests/phase0/test_sanity.py index 08c7610c0..2c13c2415 100644 --- a/tests/phase0/test_sanity.py +++ b/tests/phase0/test_sanity.py @@ -179,7 +179,7 @@ def test_deposit_in_block(state): privkey = privkeys[index] deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_DEPOSIT_AMOUNT) - item = hash(deposit_data.serialize()) + item = deposit_data.hash_tree_root() test_deposit_data_leaves.append(item) tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves)) root = get_merkle_root((tuple(test_deposit_data_leaves))) @@ -218,7 +218,7 @@ def test_deposit_top_up(state): deposit_data = build_deposit_data(pre_state, pubkey, privkey, amount) merkle_index = len(test_deposit_data_leaves) - item = hash(deposit_data.serialize()) + item = deposit_data.hash_tree_root() test_deposit_data_leaves.append(item) tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves)) root = get_merkle_root((tuple(test_deposit_data_leaves))) From 9591bea42c61e9619a845ce27511a1452630a9cd Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 15 Apr 2019 16:01:17 +1000 Subject: [PATCH 325/481] cleanup deposit tests --- test_generators/operations/deposits.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index bc2b84215..b75025f0b 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -58,11 +58,12 @@ def build_deposit(state, index=index, data=deposit_data, ) + assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, get_merkle_root(tuple(deposit_data_leaves))) return deposit -def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[spec.Deposit, spec.BeaconState, List[spec.Bytes32]]: +def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[spec.Deposit, spec.BeaconState]: genesis_deposits = genesis.create_deposits( keys.pubkeys[:initial_validator_count], keys.withdrawal_creds[:initial_validator_count] @@ -83,14 +84,12 @@ def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[s state.latest_eth1_data.deposit_root = get_merkle_root(tuple(deposit_data_leaves)) state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - return deposit, state, deposit_data_leaves + return deposit, state @to_dict def valid_deposit(): - new_dep, state, leaves = build_deposit_for_index(10, 10) - state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) - state.latest_eth1_data.deposit_count = len(leaves) + new_dep, state = build_deposit_for_index(10, 10) yield 'description', 'valid deposit to add new validator' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) @@ -100,9 +99,7 @@ def valid_deposit(): @to_dict def valid_topup(): - new_dep, state, leaves = build_deposit_for_index(10, 3) - state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) - state.latest_eth1_data.deposit_count = len(leaves) + new_dep, state = build_deposit_for_index(10, 3) yield 'description', 'valid deposit to top-up existing validator' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) @@ -112,9 +109,7 @@ def valid_topup(): @to_dict def invalid_deposit_index(): - new_dep, state, leaves = build_deposit_for_index(10, 10) - state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) - state.latest_eth1_data.deposit_count = len(leaves) + new_dep, state = build_deposit_for_index(10, 10) # Mess up deposit index, 1 too small state.deposit_index = 9 @@ -125,9 +120,7 @@ def invalid_deposit_index(): @to_dict def invalid_deposit_proof(): - new_dep, state, leaves = build_deposit_for_index(10, 10) - state.latest_eth1_data.deposit_root = get_merkle_root(tuple(leaves)) - state.latest_eth1_data.deposit_count = len(leaves) + new_dep, state = build_deposit_for_index(10, 10) # Make deposit proof invalid (at bottom of proof) new_dep.proof[-1] = spec.ZERO_HASH From 25dd3b8d8e93f8a8f3dc6a06ef34fadce37b31c6 Mon Sep 17 00:00:00 2001 From: Dmitry S Date: Mon, 15 Apr 2019 22:29:25 +1000 Subject: [PATCH 326/481] Update test_generators/README.md Co-Authored-By: protolambda --- test_generators/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/README.md b/test_generators/README.md index bd509904c..1393af068 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -1,6 +1,6 @@ # Eth2.0 Test Generators -This directory of contains all the generators for YAML tests, consumed by Eth 2.0 client implementations. +This directory contains all the generators for YAML tests, consumed by Eth 2.0 client implementations. Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests/). From 0b2a03e2768a4f7ff30161ae5c3721e2abe5941a Mon Sep 17 00:00:00 2001 From: Dmitry S Date: Mon, 15 Apr 2019 22:30:02 +1000 Subject: [PATCH 327/481] Update test_generators/README.md Co-Authored-By: protolambda --- test_generators/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/README.md b/test_generators/README.md index 1393af068..8a34cb51e 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -132,7 +132,7 @@ if __name__ == "__main__": Recommendations: - you can have more than just 1 suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])` -- you can concatenate lists of test cases, if you don't want to split it up in suites. +- you can concatenate lists of test cases, if you don't want to split it up in suites, however make sure they could be run with one handler. - you can split your suite creators into different python files/packages, good for code organization. - use config "minimal" for performance. But also implement a suite with the default config where necessary. - you may be able to write your test suite creator in a way where it does not make assumptions on constants. From d64a4f248eb38f407fcfb2654a7adceefb9a6f4f Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 15 Apr 2019 22:39:07 +1000 Subject: [PATCH 328/481] forks coverage description cleanup --- specs/test_formats/README.md | 24 +++++-------------- specs/test_formats/bls/aggregate_pubkeys.md | 4 ---- specs/test_formats/bls/aggregate_sigs.md | 4 ---- .../bls/msg_hash_g2_compressed.md | 4 ---- .../bls/msg_hash_g2_uncompressed.md | 4 ---- specs/test_formats/bls/priv_to_pub.md | 4 ---- specs/test_formats/bls/sign_msg.md | 4 ---- specs/test_formats/operations/deposits.md | 8 ------- specs/test_formats/shuffling/README.md | 4 ---- specs/test_formats/ssz/uint.md | 4 ---- 10 files changed, 6 insertions(+), 58 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 18c7329d9..6b9533056 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -75,22 +75,11 @@ There are two types of fork-data: The first is neat to have as a separate form: we prevent duplication, and can run with different presets (e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet) -The second is still somewhat ambiguous: some tests may want cover multiple forks, and can do so in different ways: -- run one test, transitioning from one to the other -- run the same test for both -- run a test for every transition from one fork to the other -- more - -There is a common factor here however: the options are exclusive, and give a clear idea on what test suites need to be ran to cover testing for a specific fork. -The way this list of forks is interpreted, is up to the test-runner: -State-transition test suites may want to just declare forks that are being covered in the test suite, - whereas shuffling test suites may want to declare a list of forks to test the shuffling algorithm for individually. - -Test-formats specify the following `forks` interpretation rules: - -- `collective`: the test suite applies to all specified forks, and only needs to run once -- `individual`: the test suite should be ran against every fork -- more types may be specified with future test types. +The second does not affect the result of the tests, it just states what is covered by the tests, + so that the right suites can be executed to see coverage for a certain fork. +For some types of tests, it may be beneficial to ensure it runs exactly the same, with any given fork "active". +Test-formats can be explicit on the need to repeat a test with different forks being "active", + but generally tests run only once. ### Test completeness @@ -107,8 +96,7 @@ The aim is to provide clients with a well-defined scope of work to run a particu title: -- Display name for the test suite summary: -- Summarizes the test suite forks_timeline: -- Used to determine the forking timeline -forks: -- Runner decides what to do: run for each fork, or run for all at once, each fork transition, etc. - - ... +forks: -- Defines the coverage. Test-runner code may decide to re-run with the different forks "activated", when applicable. config: -- Used to determine which set of constants to run (possibly compile time) with runner: *MUST be consistent with folder structure* handler: *MUST be consistent with folder structure* diff --git a/specs/test_formats/bls/aggregate_pubkeys.md b/specs/test_formats/bls/aggregate_pubkeys.md index 9a6f1cc25..43c7d6c6d 100644 --- a/specs/test_formats/bls/aggregate_pubkeys.md +++ b/specs/test_formats/bls/aggregate_pubkeys.md @@ -15,7 +15,3 @@ output: BLS Pubkey -- expected output, single BLS pubkey ## Condition The `aggregate_pubkeys` handler should aggregate the keys in the `input`, and the result should match the expected `output`. - -## Forks - -Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/aggregate_sigs.md b/specs/test_formats/bls/aggregate_sigs.md index 1588e26cb..6690c3344 100644 --- a/specs/test_formats/bls/aggregate_sigs.md +++ b/specs/test_formats/bls/aggregate_sigs.md @@ -15,7 +15,3 @@ output: BLS Signature -- expected output, single BLS signature ## Condition The `aggregate_sigs` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. - -## Forks - -Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/msg_hash_g2_compressed.md b/specs/test_formats/bls/msg_hash_g2_compressed.md index 51c64e28b..4e194e90b 100644 --- a/specs/test_formats/bls/msg_hash_g2_compressed.md +++ b/specs/test_formats/bls/msg_hash_g2_compressed.md @@ -17,7 +17,3 @@ All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with ` ## Condition The `msg_hash_g2_compressed` handler should hash the `message`, with the given `domain`, to G2 with compression, and the result should match the expected `output`. - -## Forks - -Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/msg_hash_g2_uncompressed.md b/specs/test_formats/bls/msg_hash_g2_uncompressed.md index b7d2caa02..f42ea9998 100644 --- a/specs/test_formats/bls/msg_hash_g2_uncompressed.md +++ b/specs/test_formats/bls/msg_hash_g2_uncompressed.md @@ -17,7 +17,3 @@ All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with ` ## Condition The `msg_hash_g2_uncompressed` handler should hash the `message`, with the given `domain`, to G2, without compression, and the result should match the expected `output`. - -## Forks - -Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/priv_to_pub.md b/specs/test_formats/bls/priv_to_pub.md index 9265b83ed..7af148d0f 100644 --- a/specs/test_formats/bls/priv_to_pub.md +++ b/specs/test_formats/bls/priv_to_pub.md @@ -15,7 +15,3 @@ All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with ` ## Condition The `priv_to_pub` handler should compute the public key for the given private key `input`, and the result should match the expected `output`. - -## Forks - -Forks-interpretation: `collective` diff --git a/specs/test_formats/bls/sign_msg.md b/specs/test_formats/bls/sign_msg.md index 3a6d63fa2..dd93174f2 100644 --- a/specs/test_formats/bls/sign_msg.md +++ b/specs/test_formats/bls/sign_msg.md @@ -18,7 +18,3 @@ All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with ` ## Condition The `sign_msg` handler should sign the given `message`, with `domain`, using the given `privkey`, and the result should match the expected `output`. - -## Forks - -Forks-interpretation: `collective` diff --git a/specs/test_formats/operations/deposits.md b/specs/test_formats/operations/deposits.md index 2bce84e30..b9dce318a 100644 --- a/specs/test_formats/operations/deposits.md +++ b/specs/test_formats/operations/deposits.md @@ -16,11 +16,3 @@ post: BeaconState -- state after applying the deposit. No value if deposit pr A `deposits` handler of the `operations` should process these cases, calling the implementation of the `process_deposit(state, deposit)` functionality described in the spec. The resulting state should match the expected `post` state, or no change if the `post` state is left blank. - -## Forks - -Forks-interpretation: `collective` - -Pre and post state contain slot numbers, and are time sensitive. -Additional tests will be added for future forks to cover fork-specific behavior based on input data - (including suites with deposits on fork transition blocks, covering multiple forks) diff --git a/specs/test_formats/shuffling/README.md b/specs/test_formats/shuffling/README.md index efc1b7b1a..514baf15a 100644 --- a/specs/test_formats/shuffling/README.md +++ b/specs/test_formats/shuffling/README.md @@ -30,7 +30,3 @@ Seed is the raw shuffling seed, passed to permute-index (or optimized shuffling The resulting list should match the expected output `shuffled` after shuffling the implied input, using the given `seed`. -## Forks - -Forks-interpretation: `collective` - diff --git a/specs/test_formats/ssz/uint.md b/specs/test_formats/ssz/uint.md index f71ddecb8..fd7cf3221 100644 --- a/specs/test_formats/ssz/uint.md +++ b/specs/test_formats/ssz/uint.md @@ -17,7 +17,3 @@ tags: List[string] -- description of test case, in the form of a list of labels Two-way testing can be implemented in the test-runner: - Encoding: After encoding the given input number `value`, the output should match `ssz` - Decoding: After decoding the given `ssz` bytes, it should match the input number `value` - -## Forks - -Forks-interpretation: `collective` From 87bee73222604a9abc40c3ead8f12c5c50f5e34c Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:05:42 +1000 Subject: [PATCH 329/481] Update specs/test_formats/operations/deposits.md Co-Authored-By: protolambda --- specs/test_formats/operations/deposits.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/operations/deposits.md b/specs/test_formats/operations/deposits.md index b9dce318a..8f44ebb22 100644 --- a/specs/test_formats/operations/deposits.md +++ b/specs/test_formats/operations/deposits.md @@ -15,4 +15,4 @@ post: BeaconState -- state after applying the deposit. No value if deposit pr A `deposits` handler of the `operations` should process these cases, calling the implementation of the `process_deposit(state, deposit)` functionality described in the spec. -The resulting state should match the expected `post` state, or no change if the `post` state is left blank. +The resulting state should match the expected `post` state, or if the `post` state is left blank, the handler should reject the inputs as invalid. From 956e7c5abc8e8b28025b8be46fb2ee41eb995bb8 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:06:01 +1000 Subject: [PATCH 330/481] Update specs/test_formats/shuffling/README.md Co-Authored-By: protolambda --- specs/test_formats/shuffling/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/shuffling/README.md b/specs/test_formats/shuffling/README.md index 514baf15a..57be96565 100644 --- a/specs/test_formats/shuffling/README.md +++ b/specs/test_formats/shuffling/README.md @@ -20,7 +20,7 @@ count: int shuffled: List[int] ``` -- The `bytes32` is encoded as strings, hexadecimal encoding, prefixed with `0x`. +- The `bytes32` is encoded a string, hexadecimal encoding, prefixed with `0x`. - Integers are validator indices. These are `uint64`, but realistically they are not as big. The `count` specifies the validator registry size. One should compute the shuffling for indices `0, 1, 2, 3, ..., count (exclusive)`. From 3b9d35f8e8ce5f8ca096a22dd8ae1f43f9881028 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:07:42 +1000 Subject: [PATCH 331/481] Update test_generators/README.md Co-Authored-By: protolambda --- test_generators/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/README.md b/test_generators/README.md index 8a34cb51e..743157aae 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -63,7 +63,7 @@ eth-utils==1.4.1 ../../test_libs/pyspec ``` The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself, to prevent code duplication and outdated tests. -Applying configurations to the spec is easy, and enables you to create test suites with different contexts. +Applying configurations to the spec is simple, and enables you to create test suites with different contexts. Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement. From 8a1aa275a3a197b21523f177830b1e74968d76ee Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:08:05 +1000 Subject: [PATCH 332/481] Update test_generators/operations/README.md Co-Authored-By: protolambda --- test_generators/operations/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/operations/README.md b/test_generators/operations/README.md index 9f1ecfddb..4e46c8dcb 100644 --- a/test_generators/operations/README.md +++ b/test_generators/operations/README.md @@ -3,7 +3,7 @@ Operations (or "transactions" in previous spec iterations), are atomic changes to the state, introduced by embedding in blocks. -This generators provides a series of test suites, divided into handler, for each operation type. +This generator provides a series of test suites, divided into handler, for each operation type. A operation test-runner can consume these operation test-suites, and handle different kinds of operations by processing the cases using the specified test handler. From 0400a888a69c0105a2a142b0912b54522636b2e7 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:08:27 +1000 Subject: [PATCH 333/481] Update test_generators/operations/README.md Co-Authored-By: protolambda --- test_generators/operations/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/operations/README.md b/test_generators/operations/README.md index 4e46c8dcb..e0b9d0e18 100644 --- a/test_generators/operations/README.md +++ b/test_generators/operations/README.md @@ -4,7 +4,7 @@ Operations (or "transactions" in previous spec iterations), are atomic changes to the state, introduced by embedding in blocks. This generator provides a series of test suites, divided into handler, for each operation type. -A operation test-runner can consume these operation test-suites, +An operation test-runner can consume these operation test-suites, and handle different kinds of operations by processing the cases using the specified test handler. Information on the format of the tests can be found in the [operations test formats documentation](../../specs/test_formats/operations/README.md). From 6542ae2c4ddcf5d07ced127d1717e7cf96beec63 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:08:49 +1000 Subject: [PATCH 334/481] Update test_generators/operations/deposits.py Co-Authored-By: protolambda --- test_generators/operations/deposits.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index b75025f0b..3e47482a6 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -118,6 +118,7 @@ def invalid_deposit_index(): yield 'deposit', encode(new_dep, spec.Deposit) yield 'post', None + @to_dict def invalid_deposit_proof(): new_dep, state = build_deposit_for_index(10, 10) From 79c8f562bd10307ddb11bb275b34e3a60ae5750d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:09:05 +1000 Subject: [PATCH 335/481] Update test_generators/ssz/renderers.py Co-Authored-By: protolambda --- test_generators/ssz/renderers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/ssz/renderers.py b/test_generators/ssz/renderers.py index ee8a92838..28571cdda 100644 --- a/test_generators/ssz/renderers.py +++ b/test_generators/ssz/renderers.py @@ -77,7 +77,7 @@ def render_test_case(*, sedes, valid, value=None, serial=None, description=None, raise ValueError("For valid test cases, both value and ssz must be present") else: if value_and_serial_given: - raise ValueError("For invalid test cases, either value or ssz must not be present") + raise ValueError("For invalid test cases, one of either value or ssz must not be present") if tags is None: tags = [] From 943989c611b2b8353a0db735c6b321f6b68c6b3b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:31:10 +1000 Subject: [PATCH 336/481] Update test_generators/ssz/main.py Co-Authored-By: protolambda --- test_generators/ssz/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py index c1af4ce5f..e1c67cd9e 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz/main.py @@ -18,6 +18,7 @@ def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: handler="uint", test_cases=generate_random_uint_test_cases())) + def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: return ("uint_wrong_length", "uint", gen_suite.render_suite( title="UInt Wrong Length", From 13317d3dfc84320ca2bfe76c5a2eb2f783c524ba Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:31:42 +1000 Subject: [PATCH 337/481] Update test_generators/ssz/uint_test_cases.py Co-Authored-By: protolambda --- test_generators/ssz/uint_test_cases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/ssz/uint_test_cases.py b/test_generators/ssz/uint_test_cases.py index 9ede16848..6d6492c9e 100644 --- a/test_generators/ssz/uint_test_cases.py +++ b/test_generators/ssz/uint_test_cases.py @@ -30,7 +30,7 @@ def generate_random_uint_test_cases(): sedes = UInt(bit_size) for _ in range(RANDOM_TEST_CASES_PER_BIT_SIZE): - value = random.randrange(0, 2 ** bit_size) + value = random.randrange(0, 2**bit_size) serial = ssz.encode(value, sedes) # note that we need to create the tags in each loop cycle, otherwise ruamel will use # YAML references which makes the resulting file harder to read From 5e902b448a4c7533b5de21fcd913b6fdc6d0fb1d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:32:51 +1000 Subject: [PATCH 338/481] Update test_libs/config_helpers/README.md Co-Authored-By: protolambda --- test_libs/config_helpers/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_libs/config_helpers/README.md b/test_libs/config_helpers/README.md index 184482082..eaa3f3b40 100644 --- a/test_libs/config_helpers/README.md +++ b/test_libs/config_helpers/README.md @@ -12,7 +12,7 @@ configs_path = 'configs/' import preset_loader from eth2spec.phase0 import spec -my_presets = preset_loader.load_presets(configs_path, 'main_net') +my_presets = preset_loader.load_presets(configs_path, 'mainnet') spec.apply_constants_preset(my_presets) ``` From dba7a1890b90f1351ed384180411b41433d18e81 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 15 Apr 2019 23:33:05 +1000 Subject: [PATCH 339/481] Update test_generators/ssz/main.py Co-Authored-By: protolambda --- test_generators/ssz/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test_generators/ssz/main.py b/test_generators/ssz/main.py index e1c67cd9e..1c09d51e7 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz/main.py @@ -30,6 +30,7 @@ def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: handler="uint", test_cases=generate_uint_wrong_length_test_cases())) + def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: return ("uint_bounds", "uint", gen_suite.render_suite( title="UInt Bounds", From c3d321ada80ce1bb6ae0f86205ca8ffd1b40f946 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 15 Apr 2019 23:37:13 +1000 Subject: [PATCH 340/481] fix argument typing for auxilary transition func --- test_libs/pyspec/eth2spec/phase0/state_transition.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py index d25cd3aba..9be192c1f 100644 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -10,6 +10,7 @@ from typing import ( from .spec import ( BeaconState, BeaconBlock, + Slot ) @@ -98,7 +99,7 @@ def process_epoch_transition(state: BeaconState) -> None: spec.finish_epoch_update(state) -def state_transition_to(state: BeaconState, up_to: int) -> BeaconState: +def state_transition_to(state: BeaconState, up_to: Slot) -> BeaconState: while state.slot < up_to: spec.cache_state(state) if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: From d2b7a8f5de8265884c1c112ddc8bcf9ac7e6c8ea Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 15 Apr 2019 23:55:32 +1000 Subject: [PATCH 341/481] implement assertion sanity check suggested in PR --- test_generators/operations/deposits.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index 3e47482a6..85c93f86b 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -116,7 +116,13 @@ def invalid_deposit_index(): yield 'description', 'invalid deposit index' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) - yield 'post', None + try: + spec.process_deposit(state, new_dep) + except AssertionError: + # expected + yield 'post', None + return + raise Exception('invalid_deposit_index has unexpectedly allowed deposit') @to_dict @@ -128,7 +134,13 @@ def invalid_deposit_proof(): yield 'description', 'invalid deposit proof' yield 'pre', encode(state, spec.BeaconState) yield 'deposit', encode(new_dep, spec.Deposit) - yield 'post', None + try: + spec.process_deposit(state, new_dep) + except AssertionError: + # expected + yield 'post', None + return + raise Exception('invalid_deposit_index has unexpectedly allowed deposit') @to_tuple From d8d0aaecff0ec231c790d3e236a173f58ee897c4 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 15 Apr 2019 11:02:23 -0700 Subject: [PATCH 342/481] Update simple-serialize.md --- specs/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index bc621da1a..804c66d70 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -1,4 +1,4 @@ -# SimpleSerialiZe (SSZ) +# SimpleSerialize (SSZ) This is a **work in progress** describing typing, serialization and Merkleization of Ethereum 2.0 objects. From 758696ba448a96cf68dace4c3f7cbb667c47b290 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 15 Apr 2019 11:03:53 -0700 Subject: [PATCH 343/481] Update sync_protocol.md --- specs/light_client/sync_protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 94ab8a2e4..af1e0d5b8 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,6 +1,6 @@ # Beacon Chain Light Client Syncing -__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. +__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendliness, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. ## Table of Contents From 110af997cd779953a8d379070f268c067a74385a Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 16 Apr 2019 13:32:28 +1000 Subject: [PATCH 344/481] Update scripts/phase0/function_puller.py Co-Authored-By: protolambda --- scripts/phase0/function_puller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index 812498b2b..59e5b5e24 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -2,7 +2,7 @@ import sys from typing import List -def get_spec(file_name) -> List[str]: +def get_spec(file_name: str) -> List[str]: code_lines = [] pulling_from = None current_name = None From f78f3a62f0eed0d2cfebbd14a554214ec31a3607 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Tue, 16 Apr 2019 14:57:57 +1000 Subject: [PATCH 345/481] update constants to match latest dev (#933) --- configs/constant_presets/mainnet.yaml | 70 +++++++++++++-------------- configs/constant_presets/minimal.yaml | 54 ++++++++++----------- 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index d2d91d02c..e67cf79ce 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -5,16 +5,16 @@ # Misc # --------------------------------------------------------------- -# 2**10 ` (= 1,024) +# 2**10 (= 1,024) SHARD_COUNT: 1024 -# 2**7 ` (= 128) +# 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 -# 2**5 ` (= 32) -MAX_BALANCE_CHURN_QUOTIENT: 32 -# 2**12 ` (= 4,096) +# 2**12 (= 4,096) MAX_ATTESTATION_PARTICIPANTS: 4096 -# 2**2 ` (= 4) -MAX_EXIT_DEQUEUES_PER_EPOCH: 4 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 # See issue 563 SHUFFLE_ROUND_COUNT: 90 @@ -23,19 +23,19 @@ SHUFFLE_ROUND_COUNT: 90 # --------------------------------------------------------------- # **TBD** DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 -# 2**5 ` (= 32) +# 2**5 (= 32) DEPOSIT_CONTRACT_TREE_DEPTH: 32 # Gwei values # --------------------------------------------------------------- -# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +# 2**0 * 10**9 (= 1,000,000,000) Gwei MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 ` (= 32,000,000,000) Gwei +# 2**5 * 10**9 (= 32,000,000,000) Gwei MAX_DEPOSIT_AMOUNT: 32000000000 -# 2**4 * 10**9 ` (= 16,000,000,000) Gwei +# 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +# 2**0 * 10**9 (= 1,000,000,000) Gwei HIGH_BALANCE_INCREMENT: 1000000000 @@ -54,63 +54,63 @@ BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 # --------------------------------------------------------------- # 6 seconds 6 seconds SECONDS_PER_SLOT: 6 -# 2**2 ` (= 4) slots 24 seconds +# 2**2 (= 4) slots 24 seconds MIN_ATTESTATION_INCLUSION_DELAY: 4 -# 2**6 ` (= 64) slots 6.4 minutes +# 2**6 (= 64) slots 6.4 minutes SLOTS_PER_EPOCH: 64 -# 2**0 ` (= 1) epochs 6.4 minutes +# 2**0 (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 -# 2**2 ` (= 4) epochs 25.6 minutes +# 2**2 (= 4) epochs 25.6 minutes ACTIVATION_EXIT_DELAY: 4 -# 2**4 ` (= 16) epochs ~1.7 hours +# 2**4 (= 16) epochs ~1.7 hours EPOCHS_PER_ETH1_VOTING_PERIOD: 16 -# 2**13 ` (= 8,192) slots ~13 hours +# 2**13 (= 8,192) slots ~13 hours SLOTS_PER_HISTORICAL_ROOT: 8192 -# 2**8 ` (= 256) epochs ~27 hours +# 2**8 (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**11 ` (= 2,048) epochs 9 days +# 2**11 (= 2,048) epochs 9 days PERSISTENT_COMMITTEE_PERIOD: 2048 -# 2**6 ` (= 64) +# 2**6 (= 64) epochs ~7 hours MAX_CROSSLINK_EPOCHS: 64 # State list lengths # --------------------------------------------------------------- -# 2**13 ` (= 8,192) epochs ~36 days +# 2**13 (= 8,192) epochs ~36 days LATEST_RANDAO_MIXES_LENGTH: 8192 -# 2**13 ` (= 8,192) epochs ~36 days +# 2**13 (= 8,192) epochs ~36 days LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192 -# 2**13 ` (= 8,192) epochs ~36 days +# 2**13 (= 8,192) epochs ~36 days LATEST_SLASHED_EXIT_LENGTH: 8192 # Reward and penalty quotients # --------------------------------------------------------------- -# 2**5 ` (= 32) +# 2**5 (= 32) BASE_REWARD_QUOTIENT: 32 -# 2**9 ` (= 512) +# 2**9 (= 512) WHISTLEBLOWING_REWARD_QUOTIENT: 512 -# 2**3 ` (= 8) +# 2**3 (= 8) PROPOSER_REWARD_QUOTIENT: 8 -# 2**24 ` (= 16,777,216) +# 2**24 (= 16,777,216) INACTIVITY_PENALTY_QUOTIENT: 16777216 # Max operations per block # --------------------------------------------------------------- -# 2**5 ` (= 32) +# 2**5 (= 32) MIN_PENALTY_QUOTIENT: 32 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_PROPOSER_SLASHINGS: 16 -# 2**0 ` (= 1) +# 2**0 (= 1) MAX_ATTESTER_SLASHINGS: 1 -# 2**7 ` (= 128) +# 2**7 (= 128) MAX_ATTESTATIONS: 128 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_DEPOSITS: 16 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_TRANSFERS: 16 diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index c1d69bcd4..91ab7b358 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -9,13 +9,13 @@ SHARD_COUNT: 8 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 -# 2**5 ` (= 32) -MAX_BALANCE_CHURN_QUOTIENT: 32 -# 2**12 ` (= 4,096) +# 2**12 (= 4,096) MAX_ATTESTATION_PARTICIPANTS: 4096 -# 2**2 ` (= 4) -MAX_EXIT_DEQUEUES_PER_EPOCH: 4 -# See issue 563 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 +# [customized] Faster, but unsecure. SHUFFLE_ROUND_COUNT: 10 @@ -23,19 +23,19 @@ SHUFFLE_ROUND_COUNT: 10 # --------------------------------------------------------------- # **TBD** DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 -# 2**5 ` (= 32) +# 2**5 (= 32) DEPOSIT_CONTRACT_TREE_DEPTH: 32 # Gwei values # --------------------------------------------------------------- -# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +# 2**0 * 10**9 (= 1,000,000,000) Gwei MIN_DEPOSIT_AMOUNT: 1000000000 -# 2**5 * 10**9 ` (= 32,000,000,000) Gwei +# 2**5 * 10**9 (= 32,000,000,000) Gwei MAX_DEPOSIT_AMOUNT: 32000000000 -# 2**4 * 10**9 ` (= 16,000,000,000) Gwei +# 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**0 * 10**9 ` (= 1,000,000,000) Gwei +# 2**0 * 10**9 (= 1,000,000,000) Gwei HIGH_BALANCE_INCREMENT: 1000000000 @@ -58,19 +58,19 @@ SECONDS_PER_SLOT: 6 MIN_ATTESTATION_INCLUSION_DELAY: 2 # [customized] fast epochs SLOTS_PER_EPOCH: 8 -# 2**0 ` (= 1) epochs 6.4 minutes +# 2**0 (= 1) epochs 6.4 minutes MIN_SEED_LOOKAHEAD: 1 -# 2**2 ` (= 4) epochs 25.6 minutes +# 2**2 (= 4) epochs 25.6 minutes ACTIVATION_EXIT_DELAY: 4 # [customized] higher frequency new deposits from eth1 for testing EPOCHS_PER_ETH1_VOTING_PERIOD: 2 # [customized] smaller state SLOTS_PER_HISTORICAL_ROOT: 64 -# 2**8 ` (= 256) epochs ~27 hours +# 2**8 (= 256) epochs ~27 hours MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**11 ` (= 2,048) epochs 9 days +# 2**11 (= 2,048) epochs 9 days PERSISTENT_COMMITTEE_PERIOD: 2048 -# 2**6 ` (= 64) +# 2**6 (= 64) epochs ~7 hours MAX_CROSSLINK_EPOCHS: 64 @@ -86,31 +86,31 @@ LATEST_SLASHED_EXIT_LENGTH: 64 # Reward and penalty quotients # --------------------------------------------------------------- -# 2**5 ` (= 32) +# 2**5 (= 32) BASE_REWARD_QUOTIENT: 32 -# 2**9 ` (= 512) +# 2**9 (= 512) WHISTLEBLOWING_REWARD_QUOTIENT: 512 -# 2**3 ` (= 8) +# 2**3 (= 8) PROPOSER_REWARD_QUOTIENT: 8 -# 2**24 ` (= 16,777,216) +# 2**24 (= 16,777,216) INACTIVITY_PENALTY_QUOTIENT: 16777216 # Max operations per block # --------------------------------------------------------------- -# 2**5 ` (= 32) +# 2**5 (= 32) MIN_PENALTY_QUOTIENT: 32 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_PROPOSER_SLASHINGS: 16 -# 2**0 ` (= 1) +# 2**0 (= 1) MAX_ATTESTER_SLASHINGS: 1 -# 2**7 ` (= 128) +# 2**7 (= 128) MAX_ATTESTATIONS: 128 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_DEPOSITS: 16 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -# 2**4 ` (= 16) +# 2**4 (= 16) MAX_TRANSFERS: 16 From f84818f19cfc18bb9b99d51ad238a2204ecfdab9 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 16 Apr 2019 14:59:35 +1000 Subject: [PATCH 346/481] Decouple justification and finalization processing (#925) --- specs/core/0_beacon-chain.md | 57 ++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 31 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2d150837e..a187efbb5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1726,47 +1726,42 @@ Run the following function: ```python def update_justification_and_finalization(state: BeaconState) -> None: - new_justified_epoch = state.current_justified_epoch - new_finalized_epoch = state.finalized_epoch + antepenultimate_justified_epoch = state.previous_justified_epoch - # Rotate the justification bitfield up one epoch to make room for the current epoch (and limit to 64 bits) + # Process justifications + state.previous_justified_epoch = state.current_justified_epoch + state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 - # If the previous epoch gets justified, fill the second last bit previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - 1 - state.justification_bitfield |= 2 - # If the current epoch gets justified, fill the last bit + state.current_justified_epoch = get_previous_epoch(state) + state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) + state.justification_bitfield |= (1 << 1) current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - state.justification_bitfield |= 1 + state.current_justified_epoch = get_current_epoch(state) + state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) + state.justification_bitfield |= (1 << 0) # Process finalizations bitfield = state.justification_bitfield current_epoch = get_current_epoch(state) - # The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source - if (bitfield >> 1) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 3: - new_finalized_epoch = state.previous_justified_epoch - # The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source - if (bitfield >> 1) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.previous_justified_epoch - # The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and state.current_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.current_justified_epoch - # The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and state.current_justified_epoch == current_epoch - 1: - new_finalized_epoch = state.current_justified_epoch - - # Update state jusification/finality fields - state.previous_justified_epoch = state.current_justified_epoch - state.previous_justified_root = state.current_justified_root - if new_justified_epoch != state.current_justified_epoch: - state.current_justified_epoch = new_justified_epoch - state.current_justified_root = get_block_root(state, get_epoch_start_slot(new_justified_epoch)) - if new_finalized_epoch != state.finalized_epoch: - state.finalized_epoch = new_finalized_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(new_finalized_epoch)) + # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source + if (bitfield >> 1) % 8 == 0b111 and antepenultimate_justified_epoch == current_epoch - 3: + state.finalized_epoch = antepenultimate_justified_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source + if (bitfield >> 1) % 4 == 0b11 and antepenultimate_justified_epoch == current_epoch - 2: + state.finalized_epoch = antepenultimate_justified_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source + if (bitfield >> 0) % 8 == 0b111 and state.previous_justified_root == current_epoch - 2: + state.finalized_epoch = state.previous_justified_root + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source + if (bitfield >> 0) % 4 == 0b11 and state.previous_justified_root == current_epoch - 1: + state.finalized_epoch = state.previous_justified_root + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` #### Crosslinks From ed28515a9570bfb69f440e93488691d36df2e0e3 Mon Sep 17 00:00:00 2001 From: Carl Beekhuizen Date: Tue, 16 Apr 2019 16:16:13 +1000 Subject: [PATCH 347/481] Enables transferes of BAL > 32 ETH --- .../block_processing/test_process_deposit.py | 10 +++++----- py_tests/phase0/helpers.py | 2 +- py_tests/phase0/test_sanity.py | 6 +++--- specs/core/0_beacon-chain.md | 19 ++++++++++--------- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/py_tests/phase0/block_processing/test_process_deposit.py b/py_tests/phase0/block_processing/test_process_deposit.py index cd682a4d4..31862c75d 100644 --- a/py_tests/phase0/block_processing/test_process_deposit.py +++ b/py_tests/phase0/block_processing/test_process_deposit.py @@ -32,7 +32,7 @@ def test_success(state): deposit_data_leaves, pubkey, privkey, - spec.MAX_DEPOSIT_AMOUNT, + spec.MAX_EFFECTIVE_BALANCE, ) pre_state.latest_eth1_data.deposit_root = root @@ -45,7 +45,7 @@ def test_success(state): assert len(post_state.validator_registry) == len(state.validator_registry) + 1 assert len(post_state.balances) == len(state.balances) + 1 assert post_state.validator_registry[index].pubkey == pubkeys[index] - assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT + assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count return pre_state, deposit, post_state @@ -56,7 +56,7 @@ def test_success_top_up(state): deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) validator_index = 0 - amount = spec.MAX_DEPOSIT_AMOUNT // 4 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] deposit, root, deposit_data_leaves = build_deposit( @@ -95,7 +95,7 @@ def test_wrong_index(state): deposit_data_leaves, pubkey, privkey, - spec.MAX_DEPOSIT_AMOUNT, + spec.MAX_EFFECTIVE_BALANCE, ) # mess up deposit_index @@ -124,7 +124,7 @@ def test_bad_merkle_proof(state): deposit_data_leaves, pubkey, privkey, - spec.MAX_DEPOSIT_AMOUNT, + spec.MAX_EFFECTIVE_BALANCE, ) # mess up merkle branch diff --git a/py_tests/phase0/helpers.py b/py_tests/phase0/helpers.py index b28f480b2..cef4f4827 100644 --- a/py_tests/phase0/helpers.py +++ b/py_tests/phase0/helpers.py @@ -58,7 +58,7 @@ def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=N pubkey=pubkey, # insecurely use pubkey as withdrawal key as well withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - amount=spec.MAX_DEPOSIT_AMOUNT, + amount=spec.MAX_EFFECTIVE_BALANCE, proof_of_possession=proof_of_possession, ) item = hash(deposit_data.serialize()) diff --git a/py_tests/phase0/test_sanity.py b/py_tests/phase0/test_sanity.py index 95bf9089c..cc987002e 100644 --- a/py_tests/phase0/test_sanity.py +++ b/py_tests/phase0/test_sanity.py @@ -177,7 +177,7 @@ def test_deposit_in_block(state): index = len(test_deposit_data_leaves) pubkey = pubkeys[index] privkey = privkeys[index] - deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_DEPOSIT_AMOUNT) + deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_EFFECTIVE_BALANCE) item = hash(deposit_data.serialize()) test_deposit_data_leaves.append(item) @@ -201,7 +201,7 @@ def test_deposit_in_block(state): state_transition(post_state, block) assert len(post_state.validator_registry) == len(state.validator_registry) + 1 assert len(post_state.balances) == len(state.balances) + 1 - assert get_balance(post_state, index) == spec.MAX_DEPOSIT_AMOUNT + assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE assert post_state.validator_registry[index].pubkey == pubkeys[index] return pre_state, [block], post_state @@ -212,7 +212,7 @@ def test_deposit_top_up(state): test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) validator_index = 0 - amount = spec.MAX_DEPOSIT_AMOUNT // 4 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 pubkey = pubkeys[validator_index] privkey = privkeys[validator_index] deposit_data = build_deposit_data(pre_state, pubkey, privkey, amount) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2d150837e..2e84b958f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -201,7 +201,7 @@ These configurations are updated for releases, but may be out of sync during `de | Name | Value | Unit | | - | - | :-: | | `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | -| `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | +| `MAX_EFFECTIVE_BALANCE` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | | `HIGH_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | @@ -1002,7 +1002,7 @@ def get_beacon_proposer_index(state: BeaconState, int_to_bytes8(i // 32) )[i % 32] candidate = first_committee[(current_epoch + i) % len(first_committee)] - if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: + if get_effective_balance(state, candidate) * 256 > MAX_EFFECTIVE_BALANCE * rand_byte: return candidate i += 1 ``` @@ -1081,7 +1081,7 @@ def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. """ - return min(get_balance(state, index), MAX_DEPOSIT_AMOUNT) + return min(get_balance(state, index), MAX_EFFECTIVE_BALANCE) ``` ### `get_total_balance` @@ -1373,7 +1373,7 @@ The private key corresponding to `withdrawal_pubkey` will be required to initiat ### `Deposit` logs -Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract. +Every Ethereum 1.0 deposit, of size greater than `MIN_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract. ### `Eth2Genesis` log @@ -1395,7 +1395,7 @@ For convenience, we provide the interface to the contract here: * `__init__()`: initializes the contract * `get_deposit_root() -> bytes32`: returns the current root of the deposit tree -* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. +* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be greater than `MIN_DEPOSIT_AMOUNT` inclusive. Each of these constants are specified in units of Gwei. ## On genesis @@ -1495,7 +1495,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Process genesis activations for validator_index in range(len(state.validator_registry)): - if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: + if get_effective_balance(state, validator_index) >= MAX_EFFECTIVE_BALANCE: activate_validator(state, validator_index, is_genesis=True) genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) @@ -1936,7 +1936,7 @@ def process_balance_driven_status_transitions(state: BeaconState) -> None: """ for index, validator in enumerate(state.validator_registry): balance = get_balance(state, index) - if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: + if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_EFFECTIVE_BALANCE: validator.activation_eligibility_epoch = get_current_epoch(state) if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: @@ -2335,10 +2335,11 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) # A transfer is valid in only one slot assert state.slot == transfer.slot - # Only withdrawn or not-yet-deposited accounts can transfer + # Only withdrawn, not-yet-deposited accounts, or the balance over MAX_EFFECTIVE_BALANCE can be transfered assert ( get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or - state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH + state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH or + transfer.amount + transfer.fee >= get_balance(statetransfer.sender) - get_effective_balance(transfer.sender) ) # Verify that the pubkey is valid assert ( From ae0afe389fc52ed5554b80fdae7960fd61a99299 Mon Sep 17 00:00:00 2001 From: Carl Beekhuizen Date: Tue, 16 Apr 2019 20:11:51 +1000 Subject: [PATCH 348/481] Cleaner assertion --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 2e84b958f..974131027 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2339,7 +2339,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: assert ( get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH or - transfer.amount + transfer.fee >= get_balance(statetransfer.sender) - get_effective_balance(transfer.sender) + transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= get_balance(transfer.sender) ) # Verify that the pubkey is valid assert ( From eeedea2d8c560329714270a885bb48bda35067c8 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 16 Apr 2019 12:03:22 -0500 Subject: [PATCH 349/481] Update 1_shard-data-chains.md --- specs/core/1_shard-data-chains.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 6a2094688..1e1a232fe 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -406,4 +406,4 @@ def is_valid_beacon_attestation(shard: Shard, ## Shard fork choice rule -The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot.) +The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.) From 90a6199389ef0f64ce96750dda5b3c5e34932c14 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 16 Apr 2019 12:04:29 -0500 Subject: [PATCH 350/481] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 12536070d..d1aaab74e 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,6 @@ The following are the broad design goals for Ethereum 2.0: Documentation on the different components used during spec writing can be found here: * [YAML Test Generators](test_generators/README.md) -* [Executable Python Spec](test_libs/eth2spec/README.md) +* [Executable Python Spec](test_libs/pyspec/README.md) * [Py-tests](py_tests/README.md) From 24492aa36f51d2a3ee9fa07a057be5454c3be656 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 17 Apr 2019 10:16:01 +1000 Subject: [PATCH 351/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0104a16ae..dc359b056 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -379,7 +379,7 @@ The types are defined topologically to aid in facilitating an executable version # Attestation data 'data': AttestationData, # Aggregate signature - 'aggregate_signature': 'bytes96', + 'signature': 'bytes96', } ``` @@ -495,7 +495,7 @@ The types are defined topologically to aid in facilitating an executable version # Custody bitfield 'custody_bitfield': 'bytes', # BLS aggregate signature - 'aggregate_signature': 'bytes96', + 'signature': 'bytes96', } ``` @@ -1159,7 +1159,7 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation): custody_bit_0_indices=custody_bit_0_indices, custody_bit_1_indices=custody_bit_1_indices, data=attestation.data, - aggregate_signature=attestation.aggregate_signature + signature=attestation.signature ) ``` @@ -1198,7 +1198,7 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b0)), hash_tree_root(AttestationDataAndCustodyBit(data=indexed_attestation.data, custody_bit=0b1)), ], - signature=indexed_attestation.aggregate_signature, + signature=indexed_attestation.signature, domain=get_domain(state, DOMAIN_ATTESTATION, slot_to_epoch(indexed_attestation.data.slot)), ) ``` From c82acc6970ba59abe0ddaa59c6e8f40dac9d8ea8 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Wed, 17 Apr 2019 11:49:49 +1000 Subject: [PATCH 352/481] change ci to not trigger gen building, since it is not committed anyway. See issue #928 (#947) --- .circleci/config.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d40fd467f..5be6ed500 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,13 +15,13 @@ jobs: name: Run py-tests command: make test - - run: - name: Generate YAML tests - command: make gen_yaml_tests - -# TODO in future PR (after #851): decide on CI triggering of yaml tests building, +# TODO see #928: decide on CI triggering of yaml tests building, # and destination of output (new yaml tests LFS-configured repository) # +# - run: +# name: Generate YAML tests +# command: make gen_yaml_tests +# # - store_artifacts: # path: test-reports # destination: test-reports From 882937b5375fe79e0623bc6464b83fa6a7a79463 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 17 Apr 2019 12:32:50 +1000 Subject: [PATCH 353/481] attesation proposer rewards to block processing (#920) --- .../test_process_attester_slashing.py | 2 +- .../test_process_block_header.py | 7 +- py_tests/phase0/helpers.py | 8 ++ py_tests/phase0/test_sanity.py | 5 +- specs/core/0_beacon-chain.md | 112 +++++++----------- specs/core/1_custody-game.md | 6 +- specs/validator/0_beacon-chain-validator.md | 11 +- .../eth2spec/phase0/state_transition.py | 5 +- 8 files changed, 73 insertions(+), 83 deletions(-) diff --git a/py_tests/phase0/block_processing/test_process_attester_slashing.py b/py_tests/phase0/block_processing/test_process_attester_slashing.py index 4008e38a2..8db71deb9 100644 --- a/py_tests/phase0/block_processing/test_process_attester_slashing.py +++ b/py_tests/phase0/block_processing/test_process_attester_slashing.py @@ -39,7 +39,7 @@ def run_attester_slashing_processing(state, attester_slashing, valid=True): get_balance(post_state, slashed_index) < get_balance(state, slashed_index) ) - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) # gained whistleblower reward assert ( get_balance(post_state, proposer_index) > diff --git a/py_tests/phase0/block_processing/test_process_block_header.py b/py_tests/phase0/block_processing/test_process_block_header.py index a02cca656..3b99f2ad4 100644 --- a/py_tests/phase0/block_processing/test_process_block_header.py +++ b/py_tests/phase0/block_processing/test_process_block_header.py @@ -10,6 +10,7 @@ from eth2spec.phase0.spec import ( ) from phase0.helpers import ( build_empty_block_for_next_slot, + next_slot, ) # mark entire file as 'header' @@ -61,8 +62,12 @@ def test_invalid_previous_block_root(state): def test_proposer_slashed(state): + # use stub state to get proposer index of next slot + stub_state = deepcopy(state) + next_slot(stub_state) + proposer_index = get_beacon_proposer_index(stub_state) + # set proposer to slashed - proposer_index = get_beacon_proposer_index(state, state.slot + 1) state.validator_registry[proposer_index].slashed = True block = build_empty_block_for_next_slot(state) diff --git a/py_tests/phase0/helpers.py b/py_tests/phase0/helpers.py index b28f480b2..044386696 100644 --- a/py_tests/phase0/helpers.py +++ b/py_tests/phase0/helpers.py @@ -2,6 +2,9 @@ from copy import deepcopy from py_ecc import bls +from eth2spec.phase0.state_transition import ( + state_transition, +) import eth2spec.phase0.spec as spec from eth2spec.utils.minimal_ssz import signing_root from eth2spec.phase0.spec import ( @@ -303,3 +306,8 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) domain_type=spec.DOMAIN_ATTESTATION, ) ) + + +def next_slot(state): + block = build_empty_block_for_next_slot(state) + state_transition(state, block) diff --git a/py_tests/phase0/test_sanity.py b/py_tests/phase0/test_sanity.py index 95bf9089c..04af507d7 100644 --- a/py_tests/phase0/test_sanity.py +++ b/py_tests/phase0/test_sanity.py @@ -160,7 +160,7 @@ def test_attester_slashing(state): # lost whistleblower reward assert get_balance(test_state, validator_index) < get_balance(state, validator_index) - proposer_index = get_beacon_proposer_index(test_state, test_state.slot) + proposer_index = get_beacon_proposer_index(test_state) # gained whistleblower reward assert ( get_balance(test_state, proposer_index) > @@ -260,6 +260,9 @@ def test_attestation(state): assert len(test_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 + proposer_index = get_beacon_proposer_index(test_state) + assert test_state.balances[proposer_index] > state.balances[proposer_index] + # # Epoch transition should move to previous_epoch_attestations # diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a187efbb5..5adafbe2e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -984,25 +984,17 @@ def generate_seed(state: BeaconState, ### `get_beacon_proposer_index` ```python -def get_beacon_proposer_index(state: BeaconState, - slot: Slot) -> ValidatorIndex: +def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ - Return the beacon proposer index for the ``slot``. - Due to proposer selection being based upon the validator balances during - the epoch in question, this can only be run for the current epoch. + Return the beacon proposer index at ``state.slot``. """ current_epoch = get_current_epoch(state) - assert slot_to_epoch(slot) == current_epoch - - first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0] i = 0 while True: - rand_byte = hash( - generate_seed(state, current_epoch) + - int_to_bytes8(i // 32) - )[i % 32] candidate = first_committee[(current_epoch + i) % len(first_committee)] - if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: + random_byte = hash(generate_seed(state, current_epoch) + int_to_bytes8(i // 32))[i % 32] + if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: return candidate i += 1 ``` @@ -1051,16 +1043,8 @@ def get_attestation_participants(state: BeaconState, Return the sorted participant indices corresponding to ``attestation_data`` and ``bitfield``. """ crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) - assert verify_bitfield(bitfield, len(crosslink_committee)) - - # Find the participating attesters in the committee - participants = [] - for i, validator_index in enumerate(crosslink_committee): - aggregation_bit = get_bitfield_bit(bitfield, i) - if aggregation_bit == 0b1: - participants.append(validator_index) - return sorted(participants) + return sorted([index for i, index in enumerate(crosslink_committee) if get_bitfield_bit(bitfield, i) == 0b1]) ``` ### `int_to_bytes1`, `int_to_bytes2`, ... @@ -1344,7 +1328,7 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl slashed_balance = get_effective_balance(state, slashed_index) state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT @@ -1494,9 +1478,9 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], process_deposit(state, deposit) # Process genesis activations - for validator_index in range(len(state.validator_registry)): - if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: - activate_validator(state, validator_index, is_genesis=True) + for index in range(len(state.validator_registry)): + if get_effective_balance(state, index) >= MAX_DEPOSIT_AMOUNT: + activate_validator(state, index, is_genesis=True) genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): @@ -1547,8 +1531,8 @@ def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: return get_ancestor(store, store.get_parent(block), slot) ``` -* Let `get_latest_attestation(store: Store, validator_index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `validator_index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first. -* Let `get_latest_attestation_target(store: Store, validator_index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, validator_index)`. +* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first. +* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`. * Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`. * Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`. * The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count. @@ -1560,10 +1544,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) """ validators = start_state.validator_registry active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) - attestation_targets = [ - (validator_index, get_latest_attestation_target(store, validator_index)) - for validator_index in active_validator_indices - ] + attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices] # Use the rounded-balance-with-hysteresis supplied by the protocol for fork # choice voting. This reduces the number of recomputations that need to be @@ -1628,7 +1609,7 @@ The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SL #### Helper functions -We define some helper functions utilized when processing an epoch transition: +We define epoch transition helper functions: ```python def get_current_total_balance(state: BeaconState) -> Gwei: @@ -1702,24 +1683,12 @@ def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple ``` ```python -def earliest_attestation(state: BeaconState, validator_index: ValidatorIndex) -> PendingAttestation: +def earliest_attestation(state: BeaconState, attestations: List[PendingAttestation], index: ValidatorIndex) -> PendingAttestation: return min([ - a for a in state.previous_epoch_attestations if - validator_index in get_attestation_participants(state, a.data, a.aggregation_bitfield) + a for a in attestations if index in get_attestation_participants(state, a.data, a.aggregation_bitfield) ], key=lambda a: a.inclusion_slot) ``` -```python -def inclusion_slot(state: BeaconState, validator_index: ValidatorIndex) -> Slot: - return earliest_attestation(state, validator_index).inclusion_slot -``` - -```python -def inclusion_distance(state: BeaconState, validator_index: ValidatorIndex) -> int: - attestation = earliest_attestation(state, validator_index) - return attestation.inclusion_slot - attestation.data.slot -``` - #### Justification Run the following function: @@ -1805,14 +1774,19 @@ def maybe_reset_eth1_period(state: BeaconState) -> None: First, we define some additional helpers: ```python -def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: - if get_previous_total_balance(state) == 0: +def get_base_reward_from_total_balance(state: BeaconState, total_balance: Gwei, index: ValidatorIndex) -> Gwei: + if total_balance == 0: return 0 - adjusted_quotient = integer_squareroot(get_previous_total_balance(state)) // BASE_REWARD_QUOTIENT + adjusted_quotient = integer_squareroot(total_balance) // BASE_REWARD_QUOTIENT return get_effective_balance(state, index) // adjusted_quotient // 5 ``` +```python +def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: + return get_base_reward_from_total_balance(state, get_previous_total_balance(state), index) +``` + ```python def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: if epochs_since_finality <= 4: @@ -1853,10 +1827,9 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): rewards[index] += base_reward * total_attesting_balance // total_balance # Inclusion speed bonus - rewards[index] += ( - base_reward * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) + earliest_attestation = earliest_attestation(state, state.previous_epoch_attestations, index) + inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot + rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay else: penalties[index] += base_reward # Expected FFG target @@ -1869,10 +1842,6 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ rewards[index] += base_reward * matching_head_balance // total_balance else: penalties[index] += base_reward - # Proposer bonus - if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): - proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - rewards[proposer_index] += base_reward // PROPOSER_REWARD_QUOTIENT # Take away max rewards if we're not finalizing if epochs_since_finality > 4: penalties[index] += base_reward * 4 @@ -1909,14 +1878,8 @@ def apply_rewards(state: BeaconState) -> None: rewards1, penalties1 = get_justification_and_finalization_deltas(state) rewards2, penalties2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): - set_balance( - state, - i, - max( - 0, - get_balance(state, i) + rewards1[i] + rewards2[i] - penalties1[i] - penalties2[i], - ), - ) + increase_balance(state, i, rewards1[i] + rewards2[i]) + decrease_balance(state, i, penalties1[i] + penalties2[i]) ``` #### Balance-driven status transitions @@ -2042,7 +2005,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) # Verify proposer is not slashed - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + proposer = state.validator_registry[get_beacon_proposer_index(state)] assert not proposer.slashed # Verify proposer signature assert bls_verify( @@ -2057,7 +2020,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: ```python def process_randao(state: BeaconState, block: BeaconBlock) -> None: - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + proposer = state.validator_registry[get_beacon_proposer_index(state)] # Verify that the provided randao value is valid assert bls_verify( pubkey=proposer.pubkey, @@ -2205,6 +2168,17 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: state.previous_epoch_attestations.append(pending_attestation) ``` +Run `process_proposer_attestation_rewards(state)`. + +```python +def process_proposer_attestation_rewards(state: BeaconState) -> None: + for pending_attestations in (state.previous_epoch_attestations, state.current_epoch_attestations): + for index in get_unslashed_attesting_indices(state, pending_attestations): + if earliest_attestation(state, pending_attestations, index).inclusion_slot == state.slot: + base_reward = get_base_reward_from_total_balance(state, get_current_total_balance(state), index) + increase_balance(state, get_beacon_proposer_index(state), base_reward // PROPOSER_REWARD_QUOTIENT) +``` + ##### Deposits Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. @@ -2350,7 +2324,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: # Process the transfer decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) increase_balance(state, transfer.recipient, transfer.amount) - increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) + increase_balance(state, get_beacon_proposer_index(state), transfer.fee) # Verify balances are not dust assert not (0 < get_balance(state, transfer.sender) < MIN_DEPOSIT_AMOUNT) assert not (0 < get_balance(state, transfer.recipient) < MIN_DEPOSIT_AMOUNT) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 6399a13c9..138e69fee 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -283,7 +283,7 @@ def process_custody_reveal(state: BeaconState, assert is_active_validator(revealer, get_current_epoch(state)) or revealer.exit_epoch > get_current_epoch(state) revealer.custody_reveal_index += 1 revealer.max_reveal_lateness = max(revealer.max_reveal_lateness, current_custody_period - reveal.period) - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) # Case 2: masked punitive early reveal @@ -323,7 +323,7 @@ def process_chunk_challenge(state: BeaconState, # Add new chunk challenge record state.custody_chunk_challenge_records.append(CustodyChunkChallengeRecord( challenge_index=state.custody_challenge_index, - challenger_index=get_beacon_proposer_index(state, state.slot), + challenger_index=get_beacon_proposer_index(state), responder_index=challenge.responder_index deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE, crosslink_data_root=challenge.attestation.data.crosslink_data_root, @@ -436,7 +436,7 @@ def process_chunk_challenge_response(state: BeaconState, # Clear the challenge state.custody_chunk_challenge_records.remove(challenge) # Reward the proposer - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) ``` diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 60d283664..f29e23390 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -369,24 +369,23 @@ def get_committee_assignment( return assignment ``` -A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the epoch of the slot in question and can not reliably be used to predict an epoch in advance. +A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question and can not reliably be used to predict in advance. ```python def is_proposer_at_slot(state: BeaconState, slot: Slot, validator_index: ValidatorIndex) -> bool: - current_epoch = get_current_epoch(state) - assert slot_to_epoch(slot) == current_epoch + assert state.slot == slot - return get_beacon_proposer_index(state, slot) == validator_index + return get_beacon_proposer_index(state) == validator_index ``` -_Note_: If a validator is assigned to the 0th slot of an epoch, the validator must run an empty slot transition from the previous epoch into the 0th slot of the epoch to be able to check if they are a proposer at that slot. +_Note_: To see if a validator is assigned to proposer during the slot, the validator must run an empty slot transition from the previous state to the current slot. ### Lookahead -The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the epoch in question. +The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the slot in question. `get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in phase 1+). diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py index 9be192c1f..94cd35b99 100644 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -10,7 +10,8 @@ from typing import ( from .spec import ( BeaconState, BeaconBlock, - Slot + Slot, + process_proposer_attestation_rewards, ) @@ -51,6 +52,7 @@ def process_operations(state: BeaconState, block: BeaconBlock) -> None: spec.MAX_ATTESTATIONS, spec.process_attestation, ) + process_proposer_attestation_rewards(state) assert len(block.body.deposits) == expected_deposit_count(state) process_operation_type( @@ -112,4 +114,3 @@ def state_transition(state: BeaconState, verify_state_root: bool=False) -> BeaconState: state_transition_to(state, block.slot) process_block(state, block, verify_state_root) - From 587193076e2698a3ce82c223c24b92ef5036c1d6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 17 Apr 2019 11:35:37 +0800 Subject: [PATCH 354/481] Minor adjustments (#948) 1. Rename `earliest_attestation` to `get_earliest_attestation` to avoiding conflicting to variable name 2. Extract `proposer_index` out of `process_proposer_attestation_rewards` loops --- specs/core/0_beacon-chain.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 5adafbe2e..ddb0eb6db 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1683,7 +1683,7 @@ def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple ``` ```python -def earliest_attestation(state: BeaconState, attestations: List[PendingAttestation], index: ValidatorIndex) -> PendingAttestation: +def get_earliest_attestation(state: BeaconState, attestations: List[PendingAttestation], index: ValidatorIndex) -> PendingAttestation: return min([ a for a in attestations if index in get_attestation_participants(state, a.data, a.aggregation_bitfield) ], key=lambda a: a.inclusion_slot) @@ -1827,7 +1827,7 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): rewards[index] += base_reward * total_attesting_balance // total_balance # Inclusion speed bonus - earliest_attestation = earliest_attestation(state, state.previous_epoch_attestations, index) + earliest_attestation = get_earliest_attestation(state, state.previous_epoch_attestations, index) inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay else: @@ -2172,11 +2172,12 @@ Run `process_proposer_attestation_rewards(state)`. ```python def process_proposer_attestation_rewards(state: BeaconState) -> None: + proposer_index = get_beacon_proposer_index(state) for pending_attestations in (state.previous_epoch_attestations, state.current_epoch_attestations): for index in get_unslashed_attesting_indices(state, pending_attestations): - if earliest_attestation(state, pending_attestations, index).inclusion_slot == state.slot: + if get_earliest_attestation(state, pending_attestations, index).inclusion_slot == state.slot: base_reward = get_base_reward_from_total_balance(state, get_current_total_balance(state), index) - increase_balance(state, get_beacon_proposer_index(state), base_reward // PROPOSER_REWARD_QUOTIENT) + increase_balance(state, proposer_index, base_reward // PROPOSER_REWARD_QUOTIENT) ``` ##### Deposits From 6f56c379d6cca55c9181eb825b2c4dccacd89fca Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 17 Apr 2019 14:06:28 +1000 Subject: [PATCH 355/481] Simplify get_justification_and_finalization_deltas Cosmetic changes related to `get_justification_and_finalization_deltas`: * Review naming of misc helper functions and variables * Abstract away common logic and rework for readability * Add `MAX_FINALITY_LOOKBACK` and `BASE_REWARDS_PER_EPOCH` constants * Rescale `INACTIVITY_PENALTY_QUOTIENT` Substantive changes: * Make logic relative to `previous_epoch` throughout (as opposed to mixing `current_epoch` and `previous_epoch`) * Replace inclusion delay bonus by an inclusion delay penalty --- specs/core/0_beacon-chain.md | 144 +++++++++++++++-------------------- 1 file changed, 63 insertions(+), 81 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ddb0eb6db..706f9154f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -185,6 +185,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | +| `BASE_REWARDS_PER_EPOCH` | `5` | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -234,6 +235,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | | `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | +| `MAX_FINALITY_LOOKBACK` | `2**2` (= 4) | epochs | 25.6 minutes | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` @@ -252,7 +254,7 @@ These configurations are updated for releases, but may be out of sync during `de | `BASE_REWARD_QUOTIENT` | `2**5` (= 32) | | `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | | `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | -| `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | +| `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) | | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | * The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch. @@ -875,7 +877,7 @@ def compute_committee(validator_indices: List[ValidatorIndex], ] ``` -**Note**: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. +Note: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. ### `get_crosslink_committees_at_slot` @@ -1582,7 +1584,7 @@ Transition section notes: Beacon blocks that trigger unhandled Python exceptions (e.g. out-of-range list accesses) and failed `assert`s during the state transition are considered invalid. -_Note_: If there are skipped slots between a block and its parent block, run the steps in the [state-root](#state-caching), [per-epoch](#per-epoch-processing), and [per-slot](#per-slot-processing) sections once for each skipped slot and then once for the slot containing the new block. +Note: If there are skipped slots between a block and its parent block, run the steps in the [state-root](#state-caching), [per-epoch](#per-epoch-processing), and [per-slot](#per-slot-processing) sections once for each skipped slot and then once for the slot containing the new block. ### State caching @@ -1612,30 +1614,20 @@ The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SL We define epoch transition helper functions: ```python -def get_current_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) -``` - -```python -def get_previous_total_balance(state: BeaconState) -> Gwei: +def get_previous_epoch_total_balance(state: BeaconState) -> Gwei: return get_total_balance(state, get_active_validator_indices(state, get_previous_epoch(state))) ``` -```python -def get_unslashed_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: - output = set() - for a in attestations: - output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) - return sorted(filter(lambda index: not state.validator_registry[index].slashed, list(output))) -``` +Note: The balance computed by `get_previous_epoch_total_balance` may be different to the actual total balance during the previous epoch transition. Due to the bounds on per-epoch validator churn and per-epoch rewards/penalties, the maximum balance difference is low and only marginally affects consensus safety. ```python -def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: - return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) +def get_current_epoch_total_balance(state: BeaconState) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) ``` + ```python -def get_current_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: +def get_current_epoch_matching_target_attestations(state: BeaconState) -> List[PendingAttestation]: return [ a for a in state.current_epoch_attestations if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_current_epoch(state))) @@ -1643,7 +1635,7 @@ def get_current_epoch_boundary_attestations(state: BeaconState) -> List[PendingA ``` ```python -def get_previous_epoch_boundary_attestations(state: BeaconState) -> List[PendingAttestation]: +def get_previous_epoch_matching_target_attestations(state: BeaconState) -> List[PendingAttestation]: return [ a for a in state.previous_epoch_attestations if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) @@ -1658,7 +1650,18 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe ] ``` -**Note**: Total balances computed for the previous epoch might be marginally different than the actual total balances during the previous epoch transition. Due to the tight bound on validator churn each epoch and small per-epoch rewards/penalties, the potential balance difference is very low and only marginally affects consensus safety. +```python +def get_unslashed_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: + output = set() + for a in attestations: + output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) + return sorted(filter(lambda index: not state.validator_registry[index].slashed, list(output))) +``` + +```python +def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: + return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) +``` ```python def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: @@ -1701,13 +1704,13 @@ def update_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_epoch = state.current_justified_epoch state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 - previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) - if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: + previous_epoch_matching_target_balance = get_attesting_balance(state, get_previous_epoch_matching_target_attestations(state)) + if previous_epoch_matching_target_balance * 3 >= get_previous_epoch_total_balance(state) * 2: state.current_justified_epoch = get_previous_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 1) - current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) - if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: + current_epoch_matching_target_balance = get_attesting_balance(state, get_current_epoch_matching_target_attestations(state)) + if current_epoch_matching_target_balance * 3 >= get_current_epoch_total_balance(state) * 2: state.current_justified_epoch = get_current_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 0) @@ -1771,80 +1774,58 @@ def maybe_reset_eth1_period(state: BeaconState) -> None: #### Rewards and penalties -First, we define some additional helpers: +We first define a helper: ```python -def get_base_reward_from_total_balance(state: BeaconState, total_balance: Gwei, index: ValidatorIndex) -> Gwei: +def get_base_reward(state: BeaconState, total_balance: Gwei, index: ValidatorIndex) -> Gwei: if total_balance == 0: return 0 adjusted_quotient = integer_squareroot(total_balance) // BASE_REWARD_QUOTIENT - return get_effective_balance(state, index) // adjusted_quotient // 5 + return get_effective_balance(state, index) // adjusted_quotient // BASE_REWARDS_PER_EPOCH ``` -```python -def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: - return get_base_reward_from_total_balance(state, get_previous_total_balance(state), index) -``` - -```python -def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_since_finality: int) -> Gwei: - if epochs_since_finality <= 4: - extra_penalty = 0 - else: - extra_penalty = get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT // 2 - return get_base_reward(state, index) + extra_penalty -``` - -Note: When applying penalties in the following balance recalculations, implementers should make sure the `uint64` does not underflow. - ##### Justification and finalization ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - current_epoch = get_current_epoch(state) - epochs_since_finality = current_epoch + 1 - state.finalized_epoch - rewards = [0 for index in range(len(state.validator_registry))] - penalties = [0 for index in range(len(state.validator_registry))] - # Some helper variables - boundary_attestations = get_previous_epoch_boundary_attestations(state) - boundary_attesting_balance = get_attesting_balance(state, boundary_attestations) - total_balance = get_previous_total_balance(state) - total_attesting_balance = get_attesting_balance(state, state.previous_epoch_attestations) - matching_head_attestations = get_previous_epoch_matching_head_attestations(state) - matching_head_balance = get_attesting_balance(state, matching_head_attestations) + previous_epoch = get_previous_epoch(state) eligible_validators = [ index for index, validator in enumerate(state.validator_registry) if ( - is_active_validator(validator, current_epoch) or - (validator.slashed and current_epoch < validator.withdrawable_epoch) + is_active_validator(validator, previous_epoch) or + (validator.slashed and previous_epoch < validator.withdrawable_epoch) ) ] - # Process rewards or penalties for all validators + rewards = [0 for index in range(len(state.validator_registry))] + penalties = [0 for index in range(len(state.validator_registry))] for index in eligible_validators: - base_reward = get_base_reward(state, index) - # Expected FFG source + base_reward = get_base_reward(state, get_previous_epoch_total_balance(state), index) + + # Micro-incentives for matching FFG source, matching FFG target, and matching head + for attestations in ( + state.previous_epoch_attestations, # Matching FFG source + get_previous_epoch_matching_target_attestations(state), # Matching FFG target + get_previous_epoch_matching_head_attestations(state), # Matching head + ): + if index in get_unslashed_attesting_indices(state, attestations): + rewards[index] += base_reward * get_attesting_balance(state, attestations) // get_previous_epoch_total_balance(state) + else: + penalties[index] += base_reward + + # Inclusion delay micro-penalty if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): - rewards[index] += base_reward * total_attesting_balance // total_balance - # Inclusion speed bonus earliest_attestation = get_earliest_attestation(state, state.previous_epoch_attestations, index) inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot - rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay - else: - penalties[index] += base_reward - # Expected FFG target - if index in get_unslashed_attesting_indices(state, boundary_attestations): - rewards[index] += base_reward * boundary_attesting_balance // total_balance - else: - penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) - # Expected head - if index in get_unslashed_attesting_indices(state, matching_head_attestations): - rewards[index] += base_reward * matching_head_balance // total_balance - else: - penalties[index] += base_reward - # Take away max rewards if we're not finalizing - if epochs_since_finality > 4: - penalties[index] += base_reward * 4 + penalties[index] += base_reward * (1 - MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay) + + # Inactivity penalty + epochs_since_finality = previous_epoch + 1 - state.finalized_epoch + if epochs_since_finality > MAX_FINALITY_LOOKBACK: + penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward + if index not in get_unslashed_attesting_indices(state, get_previous_epoch_matching_target_attestations(state)): + penalties[index] += get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT + return [rewards, penalties] ``` @@ -1862,10 +1843,11 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: participating_balance = get_total_balance(state, participants) total_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: + base_reward = get_base_reward(state, get_previous_epoch_total_balance(state), index) if index in participants: - rewards[index] += get_base_reward(state, index) * participating_balance // total_balance + rewards[index] += base_reward * participating_balance // total_balance else: - penalties[index] += get_base_reward(state, index) + penalties[index] += base_reward return [rewards, penalties] ``` @@ -2176,7 +2158,7 @@ def process_proposer_attestation_rewards(state: BeaconState) -> None: for pending_attestations in (state.previous_epoch_attestations, state.current_epoch_attestations): for index in get_unslashed_attesting_indices(state, pending_attestations): if get_earliest_attestation(state, pending_attestations, index).inclusion_slot == state.slot: - base_reward = get_base_reward_from_total_balance(state, get_current_total_balance(state), index) + base_reward = get_base_reward(state, get_current_epoch_total_balance(state), index) increase_balance(state, proposer_index, base_reward // PROPOSER_REWARD_QUOTIENT) ``` From 09fe642e0be1486b355cc27104e04d944159f5c2 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Wed, 17 Apr 2019 05:13:38 +0100 Subject: [PATCH 356/481] Fix for Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7ca2cc909..e679f225d 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config test: $(PY_SPEC_ALL_TARGETS) - cd $(PY_TEST_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; pytest -m minimal_config . + cd $(PY_TEST_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python -m pytest -m minimal_config . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) From 57e54093837cba740753bd453c7067f6900c086b Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 17 Apr 2019 14:30:03 +1000 Subject: [PATCH 357/481] Simplify Eth1Data voting (#938) Remove `Eth1DataVote` object and simplify logic throughout. --- specs/core/0_beacon-chain.md | 45 ++++--------------- .../eth2spec/phase0/state_transition.py | 1 - 2 files changed, 8 insertions(+), 38 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ddb0eb6db..edd52b2e3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -25,7 +25,6 @@ - [`Fork`](#fork) - [`Crosslink`](#crosslink) - [`Eth1Data`](#eth1data) - - [`Eth1DataVote`](#eth1datavote) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`IndexedAttestation`](#indexedattestation) @@ -116,7 +115,6 @@ - [Helper functions](#helper-functions-1) - [Justification](#justification) - [Crosslinks](#crosslinks) - - [Eth1 data](#eth1-data) - [Rewards and penalties](#rewards-and-penalties) - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks-1) @@ -229,7 +227,7 @@ These configurations are updated for releases, but may be out of sync during `de | `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes | | `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes | | `ACTIVATION_EXIT_DELAY` | `2**2` (= 4) | epochs | 25.6 minutes | -| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**4` (= 16) | epochs | ~1.7 hours | +| `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~1.7 hours | | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | @@ -325,17 +323,6 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `Eth1DataVote` - -```python -{ - # Data being voted for - 'eth1_data': Eth1Data, - # Vote count - 'vote_count': 'uint64', -} -``` - #### `AttestationData` ```python @@ -615,7 +602,7 @@ The types are defined topologically to aid in facilitating an executable version # Ethereum 1.0 chain data 'latest_eth1_data': Eth1Data, - 'eth1_data_votes': [Eth1DataVote], + 'eth1_data_votes': [Eth1Data], 'deposit_index': 'uint64', } ``` @@ -1754,21 +1741,6 @@ def process_crosslinks(state: BeaconState) -> None: ) ``` -#### Eth1 data - -Run the following function: - -```python -def maybe_reset_eth1_period(state: BeaconState) -> None: - if (get_current_epoch(state) + 1) % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: - for eth1_data_vote in state.eth1_data_votes: - # If a majority of all votes were for a particular eth1_data value, - # then set that as the new canonical value - if eth1_data_vote.vote_count * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: - state.latest_eth1_data = eth1_data_vote.eth1_data - state.eth1_data_votes = [] -``` - #### Rewards and penalties First, we define some additional helpers: @@ -1958,6 +1930,9 @@ Run the following function: def finish_epoch_update(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = current_epoch + 1 + # Reset eth1 data votes + if state.slot % SLOTS_PER_ETH1_VOTING_PERIOD == 0: + state.eth1_data_votes = [] # Set active index root index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH state.latest_active_index_roots[index_root_position] = hash_tree_root( @@ -2039,13 +2014,9 @@ def process_randao(state: BeaconState, block: BeaconBlock) -> None: ```python def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: - for eth1_data_vote in state.eth1_data_votes: - # If someone else has already voted for the same hash, add to its counter - if eth1_data_vote.eth1_data == block.body.eth1_data: - eth1_data_vote.vote_count += 1 - return - # If we're seeing this hash for the first time, make a new counter - state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) + state.eth1_data_votes.append(block.body.eth1_data) + if state.eth1_data_votes.count(block.body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD: + state.latest_eth1_data = block.body.eth1_data ``` #### Operations diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py index 94cd35b99..f43e2791b 100644 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -93,7 +93,6 @@ def process_block(state: BeaconState, def process_epoch_transition(state: BeaconState) -> None: spec.update_justification_and_finalization(state) spec.process_crosslinks(state) - spec.maybe_reset_eth1_period(state) spec.apply_rewards(state) spec.process_balance_driven_status_transitions(state) spec.update_registry(state) From dbb112b237669c9b4a8319994df57cddba234eb9 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Wed, 17 Apr 2019 15:23:14 +1000 Subject: [PATCH 358/481] update constants for eth1 data voting to match spec (#952) --- configs/constant_presets/mainnet.yaml | 4 ++-- configs/constant_presets/minimal.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index e67cf79ce..d06febb77 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -62,8 +62,8 @@ SLOTS_PER_EPOCH: 64 MIN_SEED_LOOKAHEAD: 1 # 2**2 (= 4) epochs 25.6 minutes ACTIVATION_EXIT_DELAY: 4 -# 2**4 (= 16) epochs ~1.7 hours -EPOCHS_PER_ETH1_VOTING_PERIOD: 16 +# 2**10 (= 1,024) slots ~1.7 hours +SLOTS_PER_ETH1_VOTING_PERIOD: 1024 # 2**13 (= 8,192) slots ~13 hours SLOTS_PER_HISTORICAL_ROOT: 8192 # 2**8 (= 256) epochs ~27 hours diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index 91ab7b358..80af5398c 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -63,7 +63,7 @@ MIN_SEED_LOOKAHEAD: 1 # 2**2 (= 4) epochs 25.6 minutes ACTIVATION_EXIT_DELAY: 4 # [customized] higher frequency new deposits from eth1 for testing -EPOCHS_PER_ETH1_VOTING_PERIOD: 2 +SLOTS_PER_ETH1_VOTING_PERIOD: 16 # [customized] smaller state SLOTS_PER_HISTORICAL_ROOT: 64 # 2**8 (= 256) epochs ~27 hours From cc92ee9f67877e4c286c0e0df12dab26b0356717 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 17 Apr 2019 15:53:24 +1000 Subject: [PATCH 359/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 706f9154f..d7f7095e4 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -185,7 +185,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | -| `BASE_REWARDS_PER_EPOCH` | `5` | +| `BASE_REWARDS_PER_EPOCH` | `4` | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) From 90cf8738bfa406aee587cf26073532ca7c50d006 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Apr 2019 17:47:56 +1000 Subject: [PATCH 360/481] Move pytests for faster dev iteration --- Makefile | 5 ++- README.md | 3 +- py_tests/README.md | 30 ---------------- py_tests/requirements.txt | 7 ---- test_libs/pyspec/README.md | 35 +++++++++++++++++-- test_libs/pyspec/requirements.txt | 1 + test_libs/pyspec/setup.py | 1 + .../pyspec/tests/README.md | 0 test_libs/pyspec/tests/__init__.py | 0 .../test_process_attestation.py | 2 +- .../test_process_attester_slashing.py | 2 +- .../test_process_block_header.py | 2 +- .../block_processing/test_process_deposit.py | 2 +- .../test_process_proposer_slashing.py | 2 +- .../block_processing/test_voluntary_exit.py | 2 +- .../pyspec/tests}/conftest.py | 0 .../pyspec/tests}/helpers.py | 0 .../pyspec/tests}/test_sanity.py | 0 18 files changed, 44 insertions(+), 50 deletions(-) delete mode 100644 py_tests/README.md delete mode 100644 py_tests/requirements.txt rename py_tests/phase0/__init__.py => test_libs/pyspec/tests/README.md (100%) create mode 100644 test_libs/pyspec/tests/__init__.py rename {py_tests/phase0 => test_libs/pyspec/tests}/block_processing/test_process_attestation.py (99%) rename {py_tests/phase0 => test_libs/pyspec/tests}/block_processing/test_process_attester_slashing.py (99%) rename {py_tests/phase0 => test_libs/pyspec/tests}/block_processing/test_process_block_header.py (98%) rename {py_tests/phase0 => test_libs/pyspec/tests}/block_processing/test_process_deposit.py (99%) rename {py_tests/phase0 => test_libs/pyspec/tests}/block_processing/test_process_proposer_slashing.py (99%) rename {py_tests/phase0 => test_libs/pyspec/tests}/block_processing/test_voluntary_exit.py (99%) rename {py_tests/phase0 => test_libs/pyspec/tests}/conftest.py (100%) rename {py_tests/phase0 => test_libs/pyspec/tests}/helpers.py (100%) rename {py_tests/phase0 => test_libs/pyspec/tests}/test_sanity.py (100%) diff --git a/Makefile b/Makefile index e679f225d..b39538791 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,6 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts TEST_LIBS_DIR = ./test_libs PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec -PY_TEST_DIR = ./py_tests YAML_TEST_DIR = ./yaml_tests GENERATOR_DIR = ./test_generators CONFIGS_DIR = ./configs @@ -24,7 +23,7 @@ all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) clean: rm -rf $(YAML_TEST_DIR) rm -rf $(GENERATOR_VENVS) - rm -rf $(PY_TEST_DIR)/venv $(PY_TEST_DIR)/.pytest_cache + rm -rf $(PY_SPEC_DIR)/venv $(PY_SPEC_DIR)/.pytest_cache rm -rf $(PY_SPEC_ALL_TARGETS) # "make gen_yaml_tests" to run generators @@ -32,7 +31,7 @@ gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config test: $(PY_SPEC_ALL_TARGETS) - cd $(PY_TEST_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python -m pytest -m minimal_config . + cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python -m pytest -m minimal_config . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) diff --git a/README.md b/README.md index d1aaab74e..aa5b7e302 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,5 @@ The following are the broad design goals for Ethereum 2.0: Documentation on the different components used during spec writing can be found here: * [YAML Test Generators](test_generators/README.md) -* [Executable Python Spec](test_libs/pyspec/README.md) -* [Py-tests](py_tests/README.md) +* [Executable Python Spec, with Py-tests](test_libs/pyspec/README.md) diff --git a/py_tests/README.md b/py_tests/README.md deleted file mode 100644 index ca2bed4cc..000000000 --- a/py_tests/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# ETH 2.0 py-tests - -These tests are not intended for client-consumption. -These tests are sanity tests, to verify if the spec itself is consistent. - -There are ideas to port these tests to the YAML test suite, - but we are still looking for inputs on how this should work. - -## How to run tests - -### Automated - -Run `make test` from the root of the spec repository. - -### Manual - -From within the py_tests folder: - -Install dependencies: -```bash -python3 -m venv venv -. venv/bin/activate -pip3 install -r requirements.txt -``` -Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement. - -Run the tests: -``` -pytest -m minimal_config . -``` diff --git a/py_tests/requirements.txt b/py_tests/requirements.txt deleted file mode 100644 index 27b3f22d8..000000000 --- a/py_tests/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -eth-utils>=1.3.0,<2 -eth-typing>=2.1.0,<3.0.0 -oyaml==0.7 -pycryptodome==3.7.3 -py_ecc>=1.6.0 -pytest>=3.6,<3.7 -../test_libs/pyspec diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 08042e746..b3cab11d2 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -7,6 +7,7 @@ With this executable spec, test-generators can easily create test-vectors for client implementations, and the spec itself can be verified to be consistent and coherent, through sanity tests implemented with pytest. + ## Building All the dynamic parts of the spec can be build at once with `make pyspec`. @@ -15,12 +16,42 @@ Alternatively, you can build a sub-set of the pyspec: `make phase0`. Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py` + +## Py-tests + +These tests are not intended for client-consumption. +These tests are sanity tests, to verify if the spec itself is consistent. + +### How to run tests + +#### Automated + +Run `make test` from the root of the spec repository. + +#### Manual + +From within the `pyspec` folder: + +Install dependencies: +```bash +python3 -m venv venv +. venv/bin/activate +pip3 install -r requirements.txt +``` +Note: make sure to run `make pyspec` from the root of the specs repository, + to build the parts of the pyspec module derived from the markdown specs. + +Run the tests: +``` +pytest -m minimal_config . +``` + + ## Contributing Contributions are welcome, but consider implementing your idea as part of the spec itself first. The pyspec is not a replacement. -If you see opportunity to include any of the `pyspec/eth2spec/utils/` code in the spec, - please submit an issue or PR. + ## License diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt index 78d41708d..3296ef807 100644 --- a/test_libs/pyspec/requirements.txt +++ b/test_libs/pyspec/requirements.txt @@ -2,3 +2,4 @@ eth-utils>=1.3.0,<2 eth-typing>=2.1.0,<3.0.0 pycryptodome==3.7.3 py_ecc>=1.6.0 +pytest>=3.6,<3.7 diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index b04847d37..1a131a417 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -3,6 +3,7 @@ from setuptools import setup, find_packages setup( name='pyspec', packages=find_packages(), + tests_require=["pytest"], install_requires=[ "eth-utils>=1.3.0,<2", "eth-typing>=2.1.0,<3.0.0", diff --git a/py_tests/phase0/__init__.py b/test_libs/pyspec/tests/README.md similarity index 100% rename from py_tests/phase0/__init__.py rename to test_libs/pyspec/tests/README.md diff --git a/test_libs/pyspec/tests/__init__.py b/test_libs/pyspec/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/py_tests/phase0/block_processing/test_process_attestation.py b/test_libs/pyspec/tests/block_processing/test_process_attestation.py similarity index 99% rename from py_tests/phase0/block_processing/test_process_attestation.py rename to test_libs/pyspec/tests/block_processing/test_process_attestation.py index 2e3f24dd6..d66434c2c 100644 --- a/py_tests/phase0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/tests/block_processing/test_process_attestation.py @@ -11,7 +11,7 @@ from eth2spec.phase0.spec import ( process_attestation, slot_to_epoch, ) -from phase0.helpers import ( +from tests.helpers import ( build_empty_block_for_next_slot, get_valid_attestation, ) diff --git a/py_tests/phase0/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py similarity index 99% rename from py_tests/phase0/block_processing/test_process_attester_slashing.py rename to test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py index 8db71deb9..9417a6ffa 100644 --- a/py_tests/phase0/block_processing/test_process_attester_slashing.py +++ b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py @@ -7,7 +7,7 @@ from eth2spec.phase0.spec import ( get_beacon_proposer_index, process_attester_slashing, ) -from phase0.helpers import ( +from tests.helpers import ( get_valid_attester_slashing, ) diff --git a/py_tests/phase0/block_processing/test_process_block_header.py b/test_libs/pyspec/tests/block_processing/test_process_block_header.py similarity index 98% rename from py_tests/phase0/block_processing/test_process_block_header.py rename to test_libs/pyspec/tests/block_processing/test_process_block_header.py index 3b99f2ad4..b35b0a9c1 100644 --- a/py_tests/phase0/block_processing/test_process_block_header.py +++ b/test_libs/pyspec/tests/block_processing/test_process_block_header.py @@ -8,7 +8,7 @@ from eth2spec.phase0.spec import ( advance_slot, process_block_header, ) -from phase0.helpers import ( +from tests.helpers import ( build_empty_block_for_next_slot, next_slot, ) diff --git a/py_tests/phase0/block_processing/test_process_deposit.py b/test_libs/pyspec/tests/block_processing/test_process_deposit.py similarity index 99% rename from py_tests/phase0/block_processing/test_process_deposit.py rename to test_libs/pyspec/tests/block_processing/test_process_deposit.py index cd682a4d4..4031e650d 100644 --- a/py_tests/phase0/block_processing/test_process_deposit.py +++ b/test_libs/pyspec/tests/block_processing/test_process_deposit.py @@ -8,7 +8,7 @@ from eth2spec.phase0.spec import ( ZERO_HASH, process_deposit, ) -from phase0.helpers import ( +from tests.helpers import ( build_deposit, privkeys, pubkeys, diff --git a/py_tests/phase0/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py similarity index 99% rename from py_tests/phase0/block_processing/test_process_proposer_slashing.py rename to test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py index d7afd2750..6f9dee262 100644 --- a/py_tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py @@ -7,7 +7,7 @@ from eth2spec.phase0.spec import ( get_current_epoch, process_proposer_slashing, ) -from phase0.helpers import ( +from tests.helpers import ( get_valid_proposer_slashing, ) diff --git a/py_tests/phase0/block_processing/test_voluntary_exit.py b/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py similarity index 99% rename from py_tests/phase0/block_processing/test_voluntary_exit.py rename to test_libs/pyspec/tests/block_processing/test_voluntary_exit.py index 4404e7255..c58c5238a 100644 --- a/py_tests/phase0/block_processing/test_voluntary_exit.py +++ b/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py @@ -9,7 +9,7 @@ from eth2spec.phase0.spec import ( get_current_epoch, process_voluntary_exit, ) -from phase0.helpers import ( +from tests.helpers import ( build_voluntary_exit, pubkey_to_privkey, ) diff --git a/py_tests/phase0/conftest.py b/test_libs/pyspec/tests/conftest.py similarity index 100% rename from py_tests/phase0/conftest.py rename to test_libs/pyspec/tests/conftest.py diff --git a/py_tests/phase0/helpers.py b/test_libs/pyspec/tests/helpers.py similarity index 100% rename from py_tests/phase0/helpers.py rename to test_libs/pyspec/tests/helpers.py diff --git a/py_tests/phase0/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py similarity index 100% rename from py_tests/phase0/test_sanity.py rename to test_libs/pyspec/tests/test_sanity.py From a4d87d44fb664d68a5d6500bb5ecf23eedcc32ba Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 17 Apr 2019 22:22:53 +1000 Subject: [PATCH 361/481] Remove custody_bitfield from PendingAttestation I don't think we need it :) --- specs/core/0_beacon-chain.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index edd52b2e3..2e8c0b593 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -425,8 +425,6 @@ The types are defined topologically to aid in facilitating an executable version 'aggregation_bitfield': 'bytes', # Attestation data 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', # Inclusion slot 'inclusion_slot': 'uint64', } @@ -2130,7 +2128,6 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: pending_attestation = PendingAttestation( data=attestation.data, aggregation_bitfield=attestation.aggregation_bitfield, - custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot ) if target_epoch == get_current_epoch(state): From 4bffa87646f3f42ca91ac2c0cdf408762d3dfadc Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 17 Apr 2019 08:57:23 -0600 Subject: [PATCH 362/481] fix finalization bug --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index edd52b2e3..6f829821e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1711,11 +1711,11 @@ def update_justification_and_finalization(state: BeaconState) -> None: state.finalized_epoch = antepenultimate_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and state.previous_justified_root == current_epoch - 2: + if (bitfield >> 0) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 2: state.finalized_epoch = state.previous_justified_root state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and state.previous_justified_root == current_epoch - 1: + if (bitfield >> 0) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 1: state.finalized_epoch = state.previous_justified_root state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` From 73bd821417bd972b886fc33a475b01e897c18cde Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 17 Apr 2019 09:26:23 -0600 Subject: [PATCH 363/481] bug fix --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6f829821e..874d253f8 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1712,11 +1712,11 @@ def update_justification_and_finalization(state: BeaconState) -> None: state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source if (bitfield >> 0) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 2: - state.finalized_epoch = state.previous_justified_root + state.finalized_epoch = state.previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source if (bitfield >> 0) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 1: - state.finalized_epoch = state.previous_justified_root + state.finalized_epoch = state.previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` From af4d06c8bf89acb2f55f5ef73f08b304018d4019 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 17 Apr 2019 09:38:10 -0600 Subject: [PATCH 364/481] only run justification/finalization after 2 epochs --- specs/core/0_beacon-chain.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 83f4ec926..86623dff5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1588,7 +1588,8 @@ def cache_state(state: BeaconState) -> None: state.latest_block_header.state_root = previous_slot_state_root # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signing_root(state.latest_block_header) + latest_block_root = signing_root(state.latest_block_header) + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_block_root ``` ### Per-epoch processing @@ -1683,7 +1684,7 @@ Run the following function: ```python def update_justification_and_finalization(state: BeaconState) -> None: - if get_current_epoch(state) == GENESIS_EPOCH: + if get_current_epoch(state) <= GENESIS_EPOCH + 1: return antepenultimate_justified_epoch = state.previous_justified_epoch From 5531adcdd152b91f20edc3ce81e7b0d87a5865e2 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 17 Apr 2019 09:41:23 -0600 Subject: [PATCH 365/481] remove old assertion in get_beacon_proposer_index --- specs/core/0_beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 86623dff5..14f2536fc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -976,7 +976,6 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: Return the beacon proposer index at ``state.slot``. """ current_epoch = get_current_epoch(state) - # assert slot_to_epoch(slot) == current_epoch first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0] i = 0 From c783cdb2f4833d1de4767ab75fb5f5c49ae567e9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 17 Apr 2019 12:31:00 -0600 Subject: [PATCH 366/481] fix bug and add transfer tests --- specs/core/0_beacon-chain.md | 2 +- .../block_processing/test_process_transfer.py | 143 ++++++++++++++++++ test_libs/pyspec/tests/helpers.py | 50 ++++++ 3 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 test_libs/pyspec/tests/block_processing/test_process_transfer.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6aa562ca4..86a017508 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2280,7 +2280,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: assert ( get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH or - transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= get_balance(transfer.sender) + transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= get_balance(state, transfer.sender) ) # Verify that the pubkey is valid assert ( diff --git a/test_libs/pyspec/tests/block_processing/test_process_transfer.py b/test_libs/pyspec/tests/block_processing/test_process_transfer.py new file mode 100644 index 000000000..df49aff98 --- /dev/null +++ b/test_libs/pyspec/tests/block_processing/test_process_transfer.py @@ -0,0 +1,143 @@ +from copy import deepcopy +import pytest + +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import ( + get_active_validator_indices, + get_balance, + get_beacon_proposer_index, + get_current_epoch, + process_transfer, + set_balance, +) +from tests.helpers import ( + get_valid_transfer, + next_epoch, +) + + +# mark entire file as 'transfers' +pytestmark = pytest.mark.transfers + + +def run_transfer_processing(state, transfer, valid=True): + """ + Run ``process_transfer`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_transfer(post_state, transfer) + return state, None + + + process_transfer(post_state, transfer) + + proposer_index = get_beacon_proposer_index(state) + pre_transfer_sender_balance = state.balances[transfer.sender] + pre_transfer_recipient_balance = state.balances[transfer.recipient] + pre_transfer_proposer_balance = state.balances[proposer_index] + sender_balance = post_state.balances[transfer.sender] + recipient_balance = post_state.balances[transfer.recipient] + assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee + assert recipient_balance == pre_transfer_recipient_balance + transfer.amount + assert post_state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee + + return state, post_state + + +def test_success_non_activated(state): + transfer = get_valid_transfer(state) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + pre_state, post_state = run_transfer_processing(state, transfer) + + return pre_state, transfer, post_state + + +def test_success_withdrawable(state): + next_epoch(state) + + transfer = get_valid_transfer(state) + + # withdrawable_epoch in past so can transfer + state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1 + + pre_state, post_state = run_transfer_processing(state, transfer) + + return pre_state, transfer, post_state + + +def test_success_active_above_max_effective(state): + sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] + amount = spec.MAX_EFFECTIVE_BALANCE // 32 + set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE + amount) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) + + pre_state, post_state = run_transfer_processing(state, transfer) + + return pre_state, transfer, post_state + + +def test_active_but_transfer_past_effective_balance(state): + sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] + amount = spec.MAX_EFFECTIVE_BALANCE // 32 + set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) + + pre_state, post_state = run_transfer_processing(state, transfer, False) + + return pre_state, transfer, post_state + + +def test_incorrect_slot(state): + transfer = get_valid_transfer(state, slot=state.slot+1) + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + pre_state, post_state = run_transfer_processing(state, transfer, False) + + return pre_state, transfer, post_state + + +def test_insufficient_balance(state): + sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] + amount = spec.MAX_EFFECTIVE_BALANCE + set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE) + transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount + 1, fee=0) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + pre_state, post_state = run_transfer_processing(state, transfer, False) + + return pre_state, transfer, post_state + + +def test_no_dust(state): + sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] + balance = state.balances[sender_index] + transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0) + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + pre_state, post_state = run_transfer_processing(state, transfer, False) + + return pre_state, transfer, post_state + + +def test_invalid_pubkey(state): + transfer = get_valid_transfer(state) + state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH + + # un-activate so validator can transfer + state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + + pre_state, post_state = run_transfer_processing(state, transfer, False) + + return pre_state, transfer, post_state \ No newline at end of file diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 9ef891219..650790b5a 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -21,11 +21,13 @@ from eth2spec.phase0.spec import ( DepositData, Eth1Data, ProposerSlashing, + Transfer, VoluntaryExit, # functions convert_to_indexed, get_active_validator_indices, get_attestation_participants, + get_balance, get_block_root, get_crosslink_committee_for_attestation, get_current_epoch, @@ -291,6 +293,48 @@ def get_valid_attestation(state, slot=None): return attestation +def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None): + if slot is None: + slot = state.slot + current_epoch = get_current_epoch(state) + if sender_index is None: + sender_index = get_active_validator_indices(state, current_epoch)[-1] + recipient_index = get_active_validator_indices(state, current_epoch)[0] + transfer_pubkey = pubkeys[-1] + transfer_privkey = privkeys[-1] + + if fee is None: + fee = get_balance(state, sender_index) // 32 + if amount is None: + amount = get_balance(state, sender_index) - fee + + transfer = Transfer( + sender=sender_index, + recipient=recipient_index, + amount=amount, + fee=fee, + slot=slot, + pubkey=transfer_pubkey, + signature=EMPTY_SIGNATURE, + ) + transfer.signature = bls.sign( + message_hash=signing_root(transfer), + privkey=transfer_privkey, + domain=get_domain( + fork=state.fork, + epoch=get_current_epoch(state), + domain_type=spec.DOMAIN_TRANSFER, + ) + ) + + # ensure withdrawal_credentials reproducable + state.validator_registry[transfer.sender].withdrawal_credentials = ( + spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:] + ) + + return transfer + + def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0): message_hash = AttestationDataAndCustodyBit( data=attestation_data, @@ -311,3 +355,9 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) def next_slot(state): block = build_empty_block_for_next_slot(state) state_transition(state, block) + + +def next_epoch(state): + block = build_empty_block_for_next_slot(state) + block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + state_transition(state, block) From 9c14900c772774d72ec03aa79d894d2954ae1cef Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 10:45:22 +1000 Subject: [PATCH 367/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 86a017508..d58a8b316 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2260,8 +2260,6 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: ##### Transfers -Note: Transfers are a temporary functionality for phases 0 and 1, to be removed in phase 2. - Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct. For each `transfer` in `block.body.transfers`, run the following function: From 0f8b1c5f3a768989853f3a8928745ca25ac11f00 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 10:56:15 +1000 Subject: [PATCH 368/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d7f7095e4..05dca84eb 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1817,11 +1817,11 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): earliest_attestation = get_earliest_attestation(state, state.previous_epoch_attestations, index) inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot - penalties[index] += base_reward * (1 - MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay) + penalties[index] += base_reward * (inclusion_delay - MIN_ATTESTATION_INCLUSION_DELAY) // (SLOTS_PER_EPOCH - MIN_ATTESTATION_INCLUSION_DELAY) # Inactivity penalty epochs_since_finality = previous_epoch + 1 - state.finalized_epoch - if epochs_since_finality > MAX_FINALITY_LOOKBACK: + if epochs_since_finality >= MIN_EPOCHS_TO_LEAK: penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward if index not in get_unslashed_attesting_indices(state, get_previous_epoch_matching_target_attestations(state)): penalties[index] += get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT From 0a1517c9de4ec91a93fbd11015423ffb39d97a82 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 18 Apr 2019 08:56:46 +0800 Subject: [PATCH 369/481] Update specs/light_client/merkle_proofs.md Co-Authored-By: hwwhww --- specs/light_client/merkle_proofs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 46fa23f82..63c018f2f 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -144,7 +144,7 @@ Generating a proof is simply a matter of taking the node of the SSZ hash tree wi Here is the verification function: ```python -def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[bytes]) -> bool: +def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[Bytes32]) -> bool: tree = {} for index, leaf in zip(indices, leaves): tree[index] = leaf From 3f9a65f1c8942c5c41dfc3ba00952465bafc6797 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 10:59:15 +1000 Subject: [PATCH 370/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 05dca84eb..482e4da49 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -235,7 +235,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | | `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | -| `MAX_FINALITY_LOOKBACK` | `2**2` (= 4) | epochs | 25.6 minutes | +| `MIN_EPOCHS_TO_LEAK` | `2**2` (= 4) | epochs | 25.6 minutes | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` @@ -1820,8 +1820,8 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ penalties[index] += base_reward * (inclusion_delay - MIN_ATTESTATION_INCLUSION_DELAY) // (SLOTS_PER_EPOCH - MIN_ATTESTATION_INCLUSION_DELAY) # Inactivity penalty - epochs_since_finality = previous_epoch + 1 - state.finalized_epoch - if epochs_since_finality >= MIN_EPOCHS_TO_LEAK: + epochs_since_finality = previous_epoch - state.finalized_epoch + if epochs_since_finality > MIN_EPOCHS_TO_LEAK: penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward if index not in get_unslashed_attesting_indices(state, get_previous_epoch_matching_target_attestations(state)): penalties[index] += get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT From 71e1a598d283d94242b579e931c57ed58e52fe73 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 11:09:30 +1000 Subject: [PATCH 371/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b98d53154..e0556a7f3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1575,16 +1575,15 @@ At every `slot > GENESIS_SLOT` run the following function: ```python def cache_state(state: BeaconState) -> None: - previous_slot_state_root = hash_tree_root(state) + # Cache latest known state root (for previous slot) + latest_state_root = hash_tree_root(state) + state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_state_root - # store the previous slot's post state transition root - state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_slot_state_root - - # cache state root in stored latest_block_header if empty + # Store latest known state root (for previous slot) in latest_block_header if it is empty if state.latest_block_header.state_root == ZERO_HASH: - state.latest_block_header.state_root = previous_slot_state_root + state.latest_block_header.state_root = latest_state_root - # store latest known block for previous slot + # Cache latest known block root (for previous slot) latest_block_root = signing_root(state.latest_block_header) state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_block_root ``` From 91921d8e86cb74de763be1079806d740a0b8a8a1 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 11:52:14 +1000 Subject: [PATCH 372/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 482e4da49..864411c9f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -235,7 +235,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | | `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | -| `MIN_EPOCHS_TO_LEAK` | `2**2` (= 4) | epochs | 25.6 minutes | +| `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` @@ -1821,7 +1821,7 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ # Inactivity penalty epochs_since_finality = previous_epoch - state.finalized_epoch - if epochs_since_finality > MIN_EPOCHS_TO_LEAK: + if epochs_since_finality > MIN_EPOCHS_TO_INACTIVITY_PENALTY: penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward if index not in get_unslashed_attesting_indices(state, get_previous_epoch_matching_target_attestations(state)): penalties[index] += get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT From 443786017fb76f4ab9b86e67b7f5666c893c251a Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 13:31:19 +1000 Subject: [PATCH 373/481] Further decouple justification and finalisation (#958) Addresses @djrtwo's comment [here](https://github.com/ethereum/eth2.0-specs/pull/925#issuecomment-484123950). --- specs/core/0_beacon-chain.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e0556a7f3..1ee9108ad 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1683,7 +1683,8 @@ def update_justification_and_finalization(state: BeaconState) -> None: if get_current_epoch(state) <= GENESIS_EPOCH + 1: return - antepenultimate_justified_epoch = state.previous_justified_epoch + old_previous_justified_epoch = state.previous_justified_epoch + old_current_justified_epoch = state.current_justified_epoch # Process justifications state.previous_justified_epoch = state.current_justified_epoch @@ -1704,20 +1705,20 @@ def update_justification_and_finalization(state: BeaconState) -> None: bitfield = state.justification_bitfield current_epoch = get_current_epoch(state) # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source - if (bitfield >> 1) % 8 == 0b111 and antepenultimate_justified_epoch == current_epoch - 3: - state.finalized_epoch = antepenultimate_justified_epoch + if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: + state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source - if (bitfield >> 1) % 4 == 0b11 and antepenultimate_justified_epoch == current_epoch - 2: - state.finalized_epoch = antepenultimate_justified_epoch + if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2: + state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 2: - state.finalized_epoch = state.previous_justified_epoch + if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2: + state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 1: - state.finalized_epoch = state.previous_justified_epoch + if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1: + state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` From fbaf771b1a184f550967254b6c5594cdff9e186a Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 14:20:34 +1000 Subject: [PATCH 374/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 109 ++++++++++++++++------------------- 1 file changed, 50 insertions(+), 59 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cdf34ae27..492caf511 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -77,7 +77,7 @@ - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) - [`get_crosslink_committee_for_attestation`](#get_crosslink_committee_for_attestation) - - [`get_attestation_participants`](#get_attestation_participants) + - [`get_attesting_indices`](#get_attesting_indices) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) - [`get_effective_balance`](#get_effective_balance) @@ -180,7 +180,7 @@ These configurations are updated for releases, but may be out of sync during `de | - | - | | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | -| `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | +| `MAX_INDICES_PER_ATTESTATION` | `2**12` (= 4,096) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `SHUFFLE_ROUND_COUNT` | 90 | @@ -303,10 +303,10 @@ The types are defined topologically to aid in facilitating an executable version { # Epoch number 'epoch': 'uint64', - # Shard data since the previous crosslink - 'crosslink_data_root': 'bytes32', # Root of the previous crosslink 'previous_crosslink_root': 'bytes32', + # Shard data since the previous crosslink + 'crosslink_data_root': 'bytes32', } ``` @@ -1023,14 +1023,14 @@ def get_crosslink_committee_for_attestation(state: BeaconState, return crosslink_committee ``` -### `get_attestation_participants` +### `get_attesting_indices` ```python -def get_attestation_participants(state: BeaconState, +def get_attesting_indices(state: BeaconState, attestation_data: AttestationData, bitfield: bytes) -> List[ValidatorIndex]: """ - Return the sorted participant indices corresponding to ``attestation_data`` and ``bitfield``. + Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``. """ crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) assert verify_bitfield(bitfield, len(crosslink_committee)) @@ -1129,8 +1129,8 @@ def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedA """ Convert ``attestation`` to (almost) indexed-verifiable form. """ - attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) + attesting_indices = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) + custody_bit_1_indices = get_attesting_indices(state, attestation.data, attestation.custody_bitfield) custody_bit_0_indices = [index for index in attesting_indices if index not in custody_bit_1_indices] return IndexedAttestation( @@ -1151,14 +1151,13 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA custody_bit_0_indices = indexed_attestation.custody_bit_0_indices custody_bit_1_indices = indexed_attestation.custody_bit_1_indices - # ensure no duplicate indices across custody bits + # Ensure no duplicate indices across custody bits assert len(set(custody_bit_0_indices).intersection(set(custody_bit_1_indices))) == 0 if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] return False - total_attesting_indices = len(custody_bit_0_indices) + len(custody_bit_1_indices) - if not (1 <= total_attesting_indices <= MAX_ATTESTATION_PARTICIPANTS): + if not (1 <= len(custody_bit_0_indices) + len(custody_bit_1_indices) <= MAX_INDICES_PER_ATTESTATION): return False if custody_bit_0_indices != sorted(custody_bit_0_indices): @@ -1615,7 +1614,7 @@ def get_previous_total_balance(state: BeaconState) -> Gwei: def get_unslashed_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: output = set() for a in attestations: - output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) + output = output.union(get_attesting_indices(state, a.data, a.aggregation_bitfield)) return sorted(filter(lambda index: not state.validator_registry[index].slashed, list(output))) ``` @@ -1651,34 +1650,31 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe **Note**: Total balances computed for the previous epoch might be marginally different than the actual total balances during the previous epoch transition. Due to the tight bound on validator churn each epoch and small per-epoch rewards/penalties, the potential balance difference is very low and only marginally affects consensus safety. ```python -def get_winning_root_and_participants(state: BeaconState, slot: Slot, shard: Shard) -> Tuple[Bytes32, Bytes32, List[ValidatorIndex]]: - attestations = state.current_epoch_attestations if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_epoch_attestations - - valid_attestations = [a for a in attestations if a.data.shard == shard] - all_roots = [(a.data.crosslink_data_root, a.data.previous_crosslink_root) for a in valid_attestations] - - # handle when no attestations for shard available - if len(all_roots) == 0: - return ZERO_HASH, ZERO_HASH, [] +def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: + pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations + crosslink_data_roots = [(a.data.crosslink_data_root, a.data.previous_crosslink_root) for a in pending_attestations if a.data.shard == shard] def get_attestations_for(root) -> List[PendingAttestation]: - return [a for a in valid_attestations if a.data.crosslink_data_root == root] + return [a for a in valid_attestations if a.data.shard == shard and a.data.crosslink_data_root == root] - # Winning crosslink root is the root with the most votes for it, ties broken in favor of - # lexicographically higher hash - winning_root, previous_crosslink_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r[0])), r[0])) - - return ( - winning_root, - previous_crosslink_root, - get_unslashed_attesting_indices(state, get_attestations_for(winning_root)), + # Winning crosslink data root is the root with the most votes for it (ties broken lexicographically) + crosslink_data_root, previous_crosslink_root = max(crosslink_data_roots, + key=lambda r: (get_attesting_balance(state, get_attestations_for(r[0])), r[0]), + default=ZERO_HASH, ZERO_HASH ) + winning_crosslink = Crosslink( + epoch=min(epoch, state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), + crosslink_data_root=crosslink_data_root, + previous_crosslink_root=previous_crosslink_root, + ) + + return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(crosslink_data_root)) ``` ```python def get_earliest_attestation(state: BeaconState, attestations: List[PendingAttestation], index: ValidatorIndex) -> PendingAttestation: return min([ - a for a in attestations if index in get_attestation_participants(state, a.data, a.aggregation_bitfield) + a for a in attestations if index in get_attesting_indices(state, a.data, a.aggregation_bitfield) ], key=lambda a: a.inclusion_slot) ``` @@ -1735,22 +1731,18 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: - state.previous_crosslinks = [crosslink for crosslink in state.current_crosslinks] + state.previous_crosslinks = state.current_crosslinks - previous_epoch = get_previous_epoch(state) - next_epoch = get_current_epoch(state) + 1 - for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): + for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state) + 1)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, previous_crosslink_root, participants = get_winning_root_and_participants(state, slot, shard) - expected_crosslink_root = hash_tree_root(state.current_crosslinks[shard]) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) - if previous_crosslink_root == expected_crosslink_root and 3 * participating_balance >= 2 * total_balance: - state.current_crosslinks[shard] = Crosslink( - epoch=min(slot_to_epoch(slot), state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=winning_root, - previous_crosslink_root=previous_crosslink_root, - ) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot, shard) + attesting_balance = get_total_balance(state, attesting_indices) + committee_balance = get_total_balance(state, crosslink_committee) + if ( + winning_crosslink.previous_crosslink_root == hash_tree_root(state.current_crosslinks[shard]) and + 3 * attesting_balance >= 2 * committee_balance + ): + state.current_crosslinks[shard] = winning_crosslink ``` #### Rewards and penalties @@ -1838,23 +1830,22 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] - previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - for slot in range(previous_epoch_start_slot, current_epoch_start_slot): + for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, previous_crosslink_root, participants = get_winning_root_and_participants(state, slot, shard) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot, shard) - # do not count as success if winning_root did not or cannot form a chain - attempted_crosslink = Crosslink(epoch=slot_to_epoch(slot), crosslink_data_root=winning_root, previous_crosslink_root=previous_crosslink_root) - actual_crosslink_root = hash_tree_root(state.previous_crosslinks[shard]) - if not actual_crosslink_root in {previous_crosslink_root, hash_tree_root(attempted_crosslink)}: - participants = [] + # Do not count as success if winning_crosslink did not or cannot form a chain + if not hash_tree_root(state.previous_crosslinks[shard]) in ( + winning_crosslink.previous_crosslink_root, + hash_tree_root(winning_crosslink) + ): + attesting_indices = [] - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) + attesting_balance = get_total_balance(state, attesting_indices) + committee_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: - if index in participants: - rewards[index] += get_base_reward(state, index) * participating_balance // total_balance + if index in attesting_indices: + rewards[index] += get_base_reward(state, index) * attesting_balance // committee_balance else: penalties[index] += get_base_reward(state, index) return [rewards, penalties] From 9ecafb2a1cc010047440f9e6aa56cdf36bc59dac Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 14:33:45 +1000 Subject: [PATCH 375/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 492caf511..139b8024b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1652,23 +1652,23 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe ```python def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations - crosslink_data_roots = [(a.data.crosslink_data_root, a.data.previous_crosslink_root) for a in pending_attestations if a.data.shard == shard] - - def get_attestations_for(root) -> List[PendingAttestation]: - return [a for a in valid_attestations if a.data.shard == shard and a.data.crosslink_data_root == root] - - # Winning crosslink data root is the root with the most votes for it (ties broken lexicographically) - crosslink_data_root, previous_crosslink_root = max(crosslink_data_roots, - key=lambda r: (get_attesting_balance(state, get_attestations_for(r[0])), r[0]), - default=ZERO_HASH, ZERO_HASH - ) - winning_crosslink = Crosslink( + candidate_crosslinks = [Crosslink( epoch=min(epoch, state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=crosslink_data_root, - previous_crosslink_root=previous_crosslink_root, - ) + crosslink_data_root=a.data.crosslink_data_root, + previous_crosslink_root=a.data.previous_crosslink_root, + ) for a in pending_attestations if a.data.shard == shard] - return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(crosslink_data_root)) + if len(candidate_crosslinks) == 0: + return Crosslink(GENESIS_EPOCH, ZERO_HASH, ZERO_HASH), [] + + def get_attestations_for(crosslink_data_root) -> List[PendingAttestation]: + return [a for a in pending_attestations if a.data.shard == shard and a.data.crosslink_data_root == crosslink_data_root] + + # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) + winning_crosslink = max(candidate_crosslinks, key=lambda c: ( + get_attesting_balance(state, get_attestations_for(c.crosslink_data_root)), c.crosslink_data_root + )) + return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink.crosslink_data_root)) ``` ```python @@ -1735,7 +1735,7 @@ def process_crosslinks(state: BeaconState) -> None: for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state) + 1)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot, shard) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) attesting_balance = get_total_balance(state, attesting_indices) committee_balance = get_total_balance(state, crosslink_committee) if ( @@ -1832,7 +1832,7 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: penalties = [0 for index in range(len(state.validator_registry))] for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot, shard) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) # Do not count as success if winning_crosslink did not or cannot form a chain if not hash_tree_root(state.previous_crosslinks[shard]) in ( From 40b55cf433998f5af3f7a3c43edd1f682fa63876 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 18 Apr 2019 14:43:24 +1000 Subject: [PATCH 376/481] More fixes --- specs/core/0_beacon-chain.md | 10 +++++----- specs/core/1_custody-game.md | 6 +++--- specs/validator/0_beacon-chain-validator.md | 2 +- test_libs/pyspec/tests/helpers.py | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 139b8024b..ac73c08aa 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -577,7 +577,7 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], 'latest_start_shard': 'uint64', - + # Finality 'previous_epoch_attestations': [PendingAttestation], 'current_epoch_attestations': [PendingAttestation], @@ -651,7 +651,7 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere ```python def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: """ - Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. + Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. """ return BeaconBlockHeader( slot=block.slot, @@ -796,7 +796,7 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: """ assert index < list_size assert list_size <= 2**40 - + for round in range(SHUFFLE_ROUND_COUNT): pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size flip = (pivot - index) % list_size @@ -1659,7 +1659,7 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch ) for a in pending_attestations if a.data.shard == shard] if len(candidate_crosslinks) == 0: - return Crosslink(GENESIS_EPOCH, ZERO_HASH, ZERO_HASH), [] + return Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH), [] def get_attestations_for(crosslink_data_root) -> List[PendingAttestation]: return [a for a in pending_attestations if a.data.shard == shard and a.data.crosslink_data_root == crosslink_data_root] @@ -1899,7 +1899,7 @@ def update_registry(state: BeaconState) -> None: ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) for index in activation_queue[:get_churn_limit(state)]: - activate_validator(state, index, is_genesis=False) + activate_validator(state, index, is_genesis=False) state.latest_start_shard = ( state.latest_start_shard + diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 138e69fee..74b086219 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -309,7 +309,7 @@ def process_chunk_challenge(state: BeaconState, responder = state.validator_registry[challenge.responder_index] assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY # Verify the responder participated in the attestation - attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) assert challenge.responder_index in attesters # Verify the challenge is not a duplicate for record in state.custody_chunk_challenge_records: @@ -359,9 +359,9 @@ def process_bit_challenge(state: BeaconState, # Verify the attestation is eligible for challenging responder = state.validator_registry[challenge.responder_index] min_challengeable_epoch = responder.exit_epoch - EPOCHS_PER_CUSTODY_PERIOD * (1 + responder.max_reveal_lateness) - assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) + assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) # Verify the responder participated in the attestation - attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) assert challenge.responder_index in attesters # A validator can be the challenger or responder for at most one challenge at a time for record in state.custody_bit_challenge_records: diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 53712880c..632bf2b62 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -299,7 +299,7 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes * Set `aggregation_bitfield[index_into_committee // 8] |= 2 ** (index_into_committee % 8)`. * Set `attestation.aggregation_bitfield = aggregation_bitfield`. -_Note_: Calling `get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`. +_Note_: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`. ##### Custody bitfield diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index fca9d3bd0..384490d83 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -25,7 +25,7 @@ from eth2spec.phase0.spec import ( # functions convert_to_indexed, get_active_validator_indices, - get_attestation_participants, + get_attesting_indices, get_block_root, get_crosslink_committee_for_attestation, get_current_epoch, @@ -300,7 +300,7 @@ def get_valid_attestation(state, slot=None): custody_bitfield=custody_bitfield, aggregate_signature=EMPTY_SIGNATURE, ) - participants = get_attestation_participants( + participants = get_attesting_indices( state, attestation.data, attestation.aggregation_bitfield, From 4bca7f984d9e86ef21a54875e8fcd466ab9c348a Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 18 Apr 2019 00:43:22 -0700 Subject: [PATCH 377/481] Add the prefix `state` when accessing the latest deposit count (#961) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1ee9108ad..e6f9872c9 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2155,7 +2155,7 @@ def process_proposer_attestation_rewards(state: BeaconState) -> None: ##### Deposits -Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. +Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)`. For each `deposit` in `block.body.deposits`, run the following function: From d9afb67e29bd9558d2e7d2aff1d110b8d057775a Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 17:45:28 +1000 Subject: [PATCH 378/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d58a8b316..bab26ff77 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1366,7 +1366,7 @@ For convenience, we provide the interface to the contract here: * `__init__()`: initializes the contract * `get_deposit_root() -> bytes32`: returns the current root of the deposit tree -* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be greater than `MIN_DEPOSIT_AMOUNT` inclusive. Each of these constants are specified in units of Gwei. +* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be at least `MIN_DEPOSIT_AMOUNT`. Each of these constants are specified in units of Gwei. ## On genesis From 40a898f1253960e7d42c21e9163343e5b14570dd Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 17:46:31 +1000 Subject: [PATCH 379/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index bab26ff77..6a01cf029 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1344,7 +1344,7 @@ The private key corresponding to `withdrawal_pubkey` will be required to initiat ### `Deposit` logs -Every Ethereum 1.0 deposit, of size greater than `MIN_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract. +Every Ethereum 1.0 deposit, of size at least `MIN_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12-381 signature) is not verified by the deposit contract. ### `Eth2Genesis` log From 72f4e2d3b613dacf6b0ef7e29268906156d1bbba Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 17:51:50 +1000 Subject: [PATCH 380/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6a01cf029..4f4158e42 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -2274,10 +2274,10 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) # A transfer is valid in only one slot assert state.slot == transfer.slot - # Only withdrawn, not-yet-deposited accounts, or the balance over MAX_EFFECTIVE_BALANCE can be transfered + # Sender must be not yet eligible for activation, withdrawn, or transfer balance over MAX_EFFECTIVE_BALANCE assert ( + state.validator_registry[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or - state.validator_registry[transfer.sender].activation_epoch == FAR_FUTURE_EPOCH or transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= get_balance(state, transfer.sender) ) # Verify that the pubkey is valid From cae5c227188e4340550de625fd448688b3ce9b68 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 18 Apr 2019 18:11:18 +1000 Subject: [PATCH 381/481] Simplify get_crosslink_committee_for_attestation and move to test helpers --- specs/core/0_beacon-chain.md | 26 ++++---------------------- test_libs/pyspec/tests/helpers.py | 9 +++++++++ 2 files changed, 13 insertions(+), 22 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ac73c08aa..b0cfb900c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -76,7 +76,6 @@ - [`generate_seed`](#generate_seed) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) - - [`get_crosslink_committee_for_attestation`](#get_crosslink_committee_for_attestation) - [`get_attesting_indices`](#get_attesting_indices) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) @@ -1005,34 +1004,17 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: return value == root ``` -### `get_crosslink_committee_for_attestation` - -```python -def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: - """ - Return the crosslink committee corresponding to ``attestation_data``. - """ - # Find the committee in the list with the desired shard - crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - - # Find the committee in the list with the desired shard - assert attestation_data.shard in [shard for _, shard in crosslink_committees] - crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] - - return crosslink_committee -``` - ### `get_attesting_indices` ```python def get_attesting_indices(state: BeaconState, - attestation_data: AttestationData, - bitfield: bytes) -> List[ValidatorIndex]: + attestation_data: AttestationData, + bitfield: bytes) -> List[ValidatorIndex]: """ Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``. """ - crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] assert verify_bitfield(bitfield, len(crosslink_committee)) return sorted([index for i, index in enumerate(crosslink_committee) if get_bitfield_bit(bitfield, i) == 0b1]) ``` diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 384490d83..214e93f75 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -276,6 +276,15 @@ def get_valid_attester_slashing(state): ) +def get_crosslink_committee_for_attestation(state: BeaconState, + attestation_data: AttestationData) -> List[ValidatorIndex]: + """ + Return the crosslink committee corresponding to ``attestation_data``. + """ + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + return [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + + def get_valid_attestation(state, slot=None): if slot is None: slot = state.slot From 964b4d380dd29f17797886465309cd4a7873ae27 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 18 Apr 2019 16:21:53 +0800 Subject: [PATCH 382/481] Fix `pyspec/tests/helpers.py` --- test_libs/pyspec/tests/helpers.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 214e93f75..44d2dcb4d 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -27,7 +27,7 @@ from eth2spec.phase0.spec import ( get_active_validator_indices, get_attesting_indices, get_block_root, - get_crosslink_committee_for_attestation, + get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, @@ -276,8 +276,7 @@ def get_valid_attester_slashing(state): ) -def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: +def get_crosslink_committee_for_attestation(state, attestation_data): """ Return the crosslink committee corresponding to ``attestation_data``. """ From 743193aa7d8f8aad717d85b6c420f8bd98566c4f Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 18 Apr 2019 16:53:02 +0800 Subject: [PATCH 383/481] nitpicks --- specs/core/0_beacon-chain.md | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b0cfb900c..ece6c1927 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -126,7 +126,7 @@ - [Per-block processing](#per-block-processing) - [Block header](#block-header) - [RANDAO](#randao) - - [Eth1 data](#eth1-data-1) + - [Eth1 data](#eth1-data) - [Operations](#operations) - [Proposer slashings](#proposer-slashings) - [Attester slashings](#attester-slashings) @@ -1429,8 +1429,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], finalized_root=ZERO_HASH, # Recent state - current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), - previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), @@ -1634,14 +1634,17 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe ```python def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations - candidate_crosslinks = [Crosslink( - epoch=min(epoch, state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=a.data.crosslink_data_root, - previous_crosslink_root=a.data.previous_crosslink_root, - ) for a in pending_attestations if a.data.shard == shard] + candidate_crosslinks = [ + Crosslink( + epoch=min(epoch, state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), + previous_crosslink_root=a.data.previous_crosslink_root, + crosslink_data_root=a.data.crosslink_data_root, + ) + for a in pending_attestations if a.data.shard == shard + ] if len(candidate_crosslinks) == 0: - return Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH), [] + return Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH), [] def get_attestations_for(crosslink_data_root) -> List[PendingAttestation]: return [a for a in pending_attestations if a.data.shard == shard and a.data.crosslink_data_root == crosslink_data_root] @@ -1714,8 +1717,9 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: state.previous_crosslinks = state.current_crosslinks - - for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state) + 1)): + previous_epoch = get_previous_epoch(state) + next_epoch = get_current_epoch(state) + 1 + for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) attesting_balance = get_total_balance(state, attesting_indices) @@ -1817,7 +1821,7 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) # Do not count as success if winning_crosslink did not or cannot form a chain - if not hash_tree_root(state.previous_crosslinks[shard]) in ( + if hash_tree_root(state.previous_crosslinks[shard]) not in ( winning_crosslink.previous_crosslink_root, hash_tree_root(winning_crosslink) ): @@ -1881,7 +1885,7 @@ def update_registry(state: BeaconState) -> None: ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) for index in activation_queue[:get_churn_limit(state)]: - activate_validator(state, index, is_genesis=False) + activate_validator(state, index) state.latest_start_shard = ( state.latest_start_shard + From 4244db92ad0a8f338004d91803ea110dc4f67697 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 18 Apr 2019 18:53:22 +1000 Subject: [PATCH 384/481] More cleanups --- specs/core/0_beacon-chain.md | 34 ++++++++++++------------------- test_libs/pyspec/tests/helpers.py | 5 ++--- 2 files changed, 15 insertions(+), 24 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b0cfb900c..b95d9acac 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1634,22 +1634,28 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe ```python def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations - candidate_crosslinks = [Crosslink( + shard_attestations = [a for a in pending_attestations if a.data.shard == shard] + shard_crosslinks = [Crosslink( epoch=min(epoch, state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), crosslink_data_root=a.data.crosslink_data_root, previous_crosslink_root=a.data.previous_crosslink_root, - ) for a in pending_attestations if a.data.shard == shard] + ) for a in shard_attestations] + candidate_crosslinks = [c for c in shard_crosslinks if + hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c)) + ] if len(candidate_crosslinks) == 0: return Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH), [] def get_attestations_for(crosslink_data_root) -> List[PendingAttestation]: - return [a for a in pending_attestations if a.data.shard == shard and a.data.crosslink_data_root == crosslink_data_root] + return [a for a in shard_attestations if a.data.crosslink_data_root == crosslink_data_root] # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) - winning_crosslink = max(candidate_crosslinks, key=lambda c: ( - get_attesting_balance(state, get_attestations_for(c.crosslink_data_root)), c.crosslink_data_root - )) + winning_crosslink = max(candidate_crosslinks, + key=lambda c: (get_attesting_balance(state, get_attestations_for(c.crosslink_data_root)), c.crosslink_data_root), + default=Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH), + ) + return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink.crosslink_data_root)) ``` @@ -1714,16 +1720,10 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: state.previous_crosslinks = state.current_crosslinks - for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state) + 1)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) - attesting_balance = get_total_balance(state, attesting_indices) - committee_balance = get_total_balance(state, crosslink_committee) - if ( - winning_crosslink.previous_crosslink_root == hash_tree_root(state.current_crosslinks[shard]) and - 3 * attesting_balance >= 2 * committee_balance - ): + if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee): state.current_crosslinks[shard] = winning_crosslink ``` @@ -1815,14 +1815,6 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) - - # Do not count as success if winning_crosslink did not or cannot form a chain - if not hash_tree_root(state.previous_crosslinks[shard]) in ( - winning_crosslink.previous_crosslink_root, - hash_tree_root(winning_crosslink) - ): - attesting_indices = [] - attesting_balance = get_total_balance(state, attesting_indices) committee_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 214e93f75..563786c7c 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -27,7 +27,6 @@ from eth2spec.phase0.spec import ( get_active_validator_indices, get_attesting_indices, get_block_root, - get_crosslink_committee_for_attestation, get_current_epoch, get_domain, get_empty_block, @@ -35,6 +34,7 @@ from eth2spec.phase0.spec import ( get_genesis_beacon_state, get_previous_epoch, get_shard_delta, + get_crosslink_committees_at_slot, hash_tree_root, slot_to_epoch, verify_merkle_branch, @@ -276,8 +276,7 @@ def get_valid_attester_slashing(state): ) -def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: +def get_crosslink_committee_for_attestation(state, attestation_data): """ Return the crosslink committee corresponding to ``attestation_data``. """ From d4ce0d20a0ee89d6df411d4b7cd8ab0d7dd3d6ef Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 18 Apr 2019 19:08:34 +1000 Subject: [PATCH 385/481] Cleanup per-epoch processing presentation (#959) --- specs/core/0_beacon-chain.md | 71 ++++++------------- .../eth2spec/phase0/state_transition.py | 9 ++- 2 files changed, 25 insertions(+), 55 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e6f9872c9..1ccc0823e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -113,21 +113,17 @@ - [State caching](#state-caching) - [Per-epoch processing](#per-epoch-processing) - [Helper functions](#helper-functions-1) - - [Justification](#justification) + - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks) - [Rewards and penalties](#rewards-and-penalties) - - [Justification and finalization](#justification-and-finalization) - - [Crosslinks](#crosslinks-1) - - [Apply rewards](#apply-rewards) - - [Balance-driven status transitions](#balance-driven-status-transitions) - - [Activation queue and start shard](#activation-queue-and-start-shard) + - [Registry updates](#registry-updates) - [Slashings](#slashings) - [Final updates](#final-updates) - [Per-slot processing](#per-slot-processing) - [Per-block processing](#per-block-processing) - [Block header](#block-header) - [RANDAO](#randao) - - [Eth1 data](#eth1-data-1) + - [Eth1 data](#eth1-data) - [Operations](#operations) - [Proposer slashings](#proposer-slashings) - [Attester slashings](#attester-slashings) @@ -575,7 +571,7 @@ The types are defined topologically to aid in facilitating an executable version # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], 'latest_start_shard': 'uint64', - + # Finality 'previous_epoch_attestations': [PendingAttestation], 'current_epoch_attestations': [PendingAttestation], @@ -648,7 +644,7 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere ```python def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: """ - Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. + Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. """ return BeaconBlockHeader( slot=block.slot, @@ -793,7 +789,7 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: """ assert index < list_size assert list_size <= 2**40 - + for round in range(SHUFFLE_ROUND_COUNT): pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size flip = (pivot - index) % list_size @@ -1674,12 +1670,12 @@ def get_earliest_attestation(state: BeaconState, attestations: List[PendingAttes ], key=lambda a: a.inclusion_slot) ``` -#### Justification +#### Justification and finalization Run the following function: ```python -def update_justification_and_finalization(state: BeaconState) -> None: +def process_justification_and_finalization(state: BeaconState) -> None: if get_current_epoch(state) <= GENESIS_EPOCH + 1: return @@ -1744,7 +1740,7 @@ def process_crosslinks(state: BeaconState) -> None: #### Rewards and penalties -First, we define some additional helpers: +First, we define additional helpers: ```python def get_base_reward_from_total_balance(state: BeaconState, total_balance: Gwei, index: ValidatorIndex) -> Gwei: @@ -1769,10 +1765,6 @@ def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_sin return get_base_reward(state, index) + extra_penalty ``` -Note: When applying penalties in the following balance recalculations, implementers should make sure the `uint64` does not underflow. - -##### Justification and finalization - ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: current_epoch = get_current_epoch(state) @@ -1821,8 +1813,6 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ return [rewards, penalties] ``` -##### Crosslinks - ```python def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: rewards = [0 for index in range(len(state.validator_registry))] @@ -1842,12 +1832,10 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: return [rewards, penalties] ``` -#### Apply rewards - -Run the following: +Run the following function: ```python -def apply_rewards(state: BeaconState) -> None: +def process_rewards_and_penalties(state: BeaconState) -> None: if get_current_epoch(state) == GENESIS_EPOCH: return @@ -1858,16 +1846,13 @@ def apply_rewards(state: BeaconState) -> None: decrease_balance(state, i, penalties1[i] + penalties2[i]) ``` -#### Balance-driven status transitions +#### Registry updates -Run `process_balance_driven_status_transitions(state)`. +Run the following function: ```python -def process_balance_driven_status_transitions(state: BeaconState) -> None: - """ - Iterate through the validator registry - and deposit or eject active validators with sufficiently high or low balances - """ +def process_registry_updates(state: BeaconState) -> None: + # Process activation eligibility and ejections for index, validator in enumerate(state.validator_registry): balance = get_balance(state, index) if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: @@ -1875,39 +1860,23 @@ def process_balance_driven_status_transitions(state: BeaconState) -> None: if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: initiate_validator_exit(state, index) -``` -#### Activation queue and start shard - -Run the following function: - -```python -def update_registry(state: BeaconState) -> None: + # Process activations activation_queue = sorted([ index for index, validator in enumerate(state.validator_registry) if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch) ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) - for index in activation_queue[:get_churn_limit(state)]: - activate_validator(state, index, is_genesis=False) - - state.latest_start_shard = ( - state.latest_start_shard + - get_shard_delta(state, get_current_epoch(state)) - ) % SHARD_COUNT + activate_validator(state, index) ``` #### Slashings -Run `process_slashings(state)`: +Run the following function: ```python def process_slashings(state: BeaconState) -> None: - """ - Process the slashings. - Note that this function mutates ``state``. - """ current_epoch = get_current_epoch(state) active_validator_indices = get_active_validator_indices(state, current_epoch) total_balance = get_total_balance(state, active_validator_indices) @@ -1931,12 +1900,14 @@ def process_slashings(state: BeaconState) -> None: Run the following function: ```python -def finish_epoch_update(state: BeaconState) -> None: +def process_final_updates(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = current_epoch + 1 # Reset eth1 data votes if state.slot % SLOTS_PER_ETH1_VOTING_PERIOD == 0: state.eth1_data_votes = [] + # Update start shard + state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT # Set active index root index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH state.latest_active_index_roots[index_root_position] = hash_tree_root( diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py index f43e2791b..38ecd2a02 100644 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -91,13 +91,12 @@ def process_block(state: BeaconState, def process_epoch_transition(state: BeaconState) -> None: - spec.update_justification_and_finalization(state) + spec.process_justification_and_finalization(state) spec.process_crosslinks(state) - spec.apply_rewards(state) - spec.process_balance_driven_status_transitions(state) - spec.update_registry(state) + spec.process_rewards_and_penalties(state) + spec.process_registry_updates(state) spec.process_slashings(state) - spec.finish_epoch_update(state) + spec.process_final_updates(state) def state_transition_to(state: BeaconState, up_to: Slot) -> BeaconState: From 7a016489c15dffc6c7e7ff3dc36a442922ea6abd Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Thu, 18 Apr 2019 19:33:38 +1000 Subject: [PATCH 386/481] Moar --- specs/core/0_beacon-chain.md | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 14a387467..1c00ac659 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1627,33 +1627,34 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe **Note**: Total balances computed for the previous epoch might be marginally different than the actual total balances during the previous epoch transition. Due to the tight bound on validator churn each epoch and small per-epoch rewards/penalties, the potential balance difference is very low and only marginally affects consensus safety. +```python +def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationData) -> Crosslink: + return Crosslink( + epoch=min(slot_to_epoch(data.slot), state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS), + crosslink_data_root=data.crosslink_data_root, + previous_crosslink_root=data.previous_crosslink_root, + ) +``` + ```python def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations shard_attestations = [a for a in pending_attestations if a.data.shard == shard] - shard_crosslinks = [ - Crosslink( - epoch=min(epoch, state.current_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=a.data.crosslink_data_root, - previous_crosslink_root=a.data.previous_crosslink_root, - ) for a in shard_attestations - ] + shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] candidate_crosslinks = [c for c in shard_crosslinks if hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c)) ] if len(candidate_crosslinks) == 0: return Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH), [] - def get_attestations_for(crosslink_data_root) -> List[PendingAttestation]: - return [a for a in shard_attestations if a.data.crosslink_data_root == crosslink_data_root] - + def get_attestations_for(crosslink: Crosslink) -> List[PendingAttestation]: + return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink] # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) - winning_crosslink = max(candidate_crosslinks, - key=lambda c: (get_attesting_balance(state, get_attestations_for(c.crosslink_data_root)), c.crosslink_data_root), - default=Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH, previous_crosslink_root=ZERO_HASH), - ) + winning_crosslink = max(candidate_crosslinks, key=lambda crosslink: ( + get_attesting_balance(state, get_attestations_for(crosslink)), crosslink.crosslink_data_root + )) - return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink.crosslink_data_root)) + return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink)) ``` ```python From cb5c95b84e6e2e8970fe3cf6ce68df277eb82b29 Mon Sep 17 00:00:00 2001 From: Carl Beekhuizen Date: Thu, 18 Apr 2019 12:35:22 +0200 Subject: [PATCH 387/481] Fixes tests --- .../pyspec/tests/block_processing/test_process_transfer.py | 2 +- test_libs/pyspec/tests/test_sanity.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test_libs/pyspec/tests/block_processing/test_process_transfer.py b/test_libs/pyspec/tests/block_processing/test_process_transfer.py index df49aff98..71d0894bd 100644 --- a/test_libs/pyspec/tests/block_processing/test_process_transfer.py +++ b/test_libs/pyspec/tests/block_processing/test_process_transfer.py @@ -52,7 +52,7 @@ def run_transfer_processing(state, transfer, valid=True): def test_success_non_activated(state): transfer = get_valid_transfer(state) # un-activate so validator can transfer - state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH + state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH pre_state, post_state = run_transfer_processing(state, transfer) diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index ee4ab405d..2004a2f25 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -442,7 +442,7 @@ def test_transfer(state): spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer_pubkey)[1:] ) # un-activate so validator can transfer - pre_state.validator_registry[sender_index].activation_epoch = spec.FAR_FUTURE_EPOCH + pre_state.validator_registry[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH post_state = deepcopy(pre_state) # From d8a4a48ed819ee8ea5ab4c95797282b1d9a6cf5f Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Thu, 18 Apr 2019 22:54:17 +1000 Subject: [PATCH 388/481] SSZ impl fixes (#960) * fix serialization mixup of array types, fix variable size vector serialization, document, structure and de-deduplicate code * more cleanup + minor fixes in earlier improvements * Make type-inference stable on empty lists/vectors * Add get_zero_value --- .../pyspec/eth2spec/utils/minimal_ssz.py | 172 +++++++++++++++--- 1 file changed, 143 insertions(+), 29 deletions(-) diff --git a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py index 902ed8460..8f42f1f65 100644 --- a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py +++ b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py @@ -1,5 +1,6 @@ from .hash_function import hash +from typing import Any BYTES_PER_CHUNK = 32 BYTES_PER_LENGTH_PREFIX = 4 @@ -9,15 +10,16 @@ ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK def SSZType(fields): class SSZObject(): def __init__(self, **kwargs): - for f in fields: + for f, t in fields.items(): if f not in kwargs: - raise Exception("Missing constructor argument: %s" % f) - setattr(self, f, kwargs[f]) + setattr(self, f, get_zero_value(t)) + else: + setattr(self, f, kwargs[f]) def __eq__(self, other): return ( - self.fields == other.fields and - self.serialize() == other.serialize() + self.fields == other.fields and + self.serialize() == other.serialize() ) def __hash__(self): @@ -58,18 +60,40 @@ class Vector(): def is_basic(typ): - return isinstance(typ, str) and (typ[:4] in ('uint', 'bool') or typ == 'byte') + # if not a string, it is a complex, and cannot be basic + if not isinstance(typ, str): + return False + # "uintN": N-bit unsigned integer (where N in [8, 16, 32, 64, 128, 256]) + elif typ[:4] == 'uint' and typ[4:] in ['8', '16', '32', '64', '128', '256']: + return True + # "bool": True or False + elif typ == 'bool': + return True + # alias: "byte" -> "uint8" + elif typ == 'byte': + return True + # default + else: + return False def is_constant_sized(typ): + # basic objects are fixed size by definition if is_basic(typ): return True + # dynamic size array type, "list": [elem_type]. + # Not constant size by definition. elif isinstance(typ, list) and len(typ) == 1: - return is_constant_sized(typ[0]) - elif isinstance(typ, list) and len(typ) == 2: return False + # fixed size array type, "vector": [elem_type, length] + # Constant size, but only if the elements are. + elif isinstance(typ, list) and len(typ) == 2: + return is_constant_sized(typ[0]) + # bytes array (fixed or dynamic size) elif isinstance(typ, str) and typ[:5] == 'bytes': - return len(typ) > 5 + # if no length suffix, it has a dynamic size + return typ != 'bytes' + # containers are only constant-size if all of the fields are constant size. elif hasattr(typ, 'fields'): for subtype in typ.fields.values(): if not is_constant_sized(subtype): @@ -90,40 +114,98 @@ def coerce_to_bytes(x): raise Exception("Expecting bytes") +def encode_bytes(value): + serialized_bytes = coerce_to_bytes(value) + assert len(serialized_bytes) < 2 ** (8 * BYTES_PER_LENGTH_PREFIX) + serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') + return serialized_length + serialized_bytes + + +def encode_variable_size_container(values, types): + return encode_bytes(encode_fixed_size_container(values, types)) + + +def encode_fixed_size_container(values, types): + return b''.join([serialize_value(v, typ) for (v, typ) in zip(values, types)]) + + def serialize_value(value, typ=None): if typ is None: typ = infer_type(value) + # "uintN" if isinstance(typ, str) and typ[:4] == 'uint': length = int(typ[4:]) assert length in (8, 16, 32, 64, 128, 256) return value.to_bytes(length // 8, 'little') - elif typ == 'bool': + # "bool" + elif isinstance(typ, str) and typ == 'bool': assert value in (True, False) return b'\x01' if value is True else b'\x00' - elif (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes': - serialized_bytes = coerce_to_bytes(value) if typ == 'bytes' else b''.join([serialize_value(element, typ[0]) for element in value]) - assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) - serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') - return serialized_length + serialized_bytes + # Vector elif isinstance(typ, list) and len(typ) == 2: + # (regardless of element type, sanity-check if the length reported in the vector type matches the value length) assert len(value) == typ[1] - return b''.join([serialize_value(element, typ[0]) for element in value]) + # If value is fixed-size (i.e. element type is fixed-size): + if is_constant_sized(typ): + return encode_fixed_size_container(value, [typ[0]] * len(value)) + # If value is variable-size (i.e. element type is variable-size) + else: + return encode_variable_size_container(value, [typ[0]] * len(value)) + # "bytes" (variable size) + elif isinstance(typ, str) and typ == 'bytes': + return encode_bytes(value) + # List + elif isinstance(typ, list) and len(typ) == 1: + return encode_variable_size_container(value, [typ[0]] * len(value)) + # "bytesN" (fixed size) elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes': assert len(value) == int(typ[5:]), (value, int(typ[5:])) return coerce_to_bytes(value) + # containers elif hasattr(typ, 'fields'): - serialized_bytes = b''.join([serialize_value(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) + values = [getattr(value, field) for field in typ.fields.keys()] + types = list(typ.fields.values()) if is_constant_sized(typ): - return serialized_bytes + return encode_fixed_size_container(values, types) else: - assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) - serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') - return serialized_length + serialized_bytes + return encode_variable_size_container(values, types) else: print(value, typ) raise Exception("Type not recognized") +def get_zero_value(typ: Any) -> Any: + if isinstance(typ, str): + # Bytes array + if typ == 'bytes': + return b'' + # bytesN + elif typ[:5] == 'bytes' and len(typ) > 5: + length = int(typ[5:]) + return b'\x00' * length + # Basic types + elif typ == 'bool': + return False + elif typ[:4] == 'uint': + return 0 + elif typ == 'byte': + return 0x00 + else: + raise ValueError("Type not recognized") + # Vector: + elif isinstance(typ, list) and len(typ) == 2: + return [get_zero_value(typ[0]) for _ in range(typ[1])] + # List: + elif isinstance(typ, list) and len(typ) == 1: + return [] + # Container: + elif hasattr(typ, 'fields'): + return typ(**{field: get_zero_value(subtype) for field, subtype in typ.fields.items()}) + else: + print(typ) + raise Exception("Type not recognized") + + def chunkify(bytez): bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK) return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] @@ -152,12 +234,27 @@ def mix_in_length(root, length): def infer_type(value): + """ + Note: defaults to uint64 for integer type inference due to lack of information. + Other integer sizes are still supported, see spec. + :param value: The value to infer a SSZ type for. + :return: The SSZ type. + """ if hasattr(value.__class__, 'fields'): return value.__class__ elif isinstance(value, Vector): - return [infer_type(value[0]) if len(value) > 0 else 'uint64', len(value)] + if len(value) > 0: + return [infer_type(value[0]), len(value)] + else: + # Element type does not matter too much, + # assumed to be a basic type for size-encoding purposes, vector is empty. + return ['uint64'] elif isinstance(value, list): - return [infer_type(value[0])] if len(value) > 0 else ['uint64'] + if len(value) > 0: + return [infer_type(value[0])] + else: + # Element type does not matter, list-content size will be encoded regardless, list is empty. + return ['uint64'] elif isinstance(value, (bytes, str)): return 'bytes' elif isinstance(value, int): @@ -169,24 +266,41 @@ def infer_type(value): def hash_tree_root(value, typ=None): if typ is None: typ = infer_type(value) + # ------------------------------------- + # merkleize(pack(value)) + # basic object: merkleize packed version (merkleization pads it to 32 bytes if it is not already) if is_basic(typ): return merkleize(pack([value], typ)) - elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): - return mix_in_length(merkleize(pack(value, typ[0])), len(value)) - elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): - return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) + # or a vector of basic objects elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]): assert len(value) == typ[1] return merkleize(pack(value, typ[0])) + # ------------------------------------- + # mix_in_length(merkleize(pack(value)), len(value)) + # if value is a list of basic objects + elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): + return mix_in_length(merkleize(pack(value, typ[0])), len(value)) + # (needs some extra work for non-fixed-sized bytes array) elif typ == 'bytes': return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value)) + # ------------------------------------- + # merkleize([hash_tree_root(element) for element in value]) + # if value is a vector of composite objects + elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): + return merkleize([hash_tree_root(element, typ[0]) for element in value]) + # (needs some extra work for fixed-sized bytes array) elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5: assert len(value) == int(typ[5:]) return merkleize(chunkify(coerce_to_bytes(value))) - elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): - return merkleize([hash_tree_root(element, typ[0]) for element in value]) + # or a container elif hasattr(typ, 'fields'): return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) + # ------------------------------------- + # mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value)) + # if value is a list of composite objects + elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): + return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) + # ------------------------------------- else: raise Exception("Type not recognized") From 741a74a02cbb4d7efce905ed8cfc4732d2fd7c00 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 18 Apr 2019 11:16:50 -0600 Subject: [PATCH 389/481] re-add crosslink tests and ensure pass --- specs/core/0_beacon-chain.md | 9 +- .../test_process_crosslinks.py | 136 ++++++++++++++++++ 2 files changed, 141 insertions(+), 4 deletions(-) create mode 100644 test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 1c00ac659..eaceec6ac 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1631,8 +1631,8 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationData) -> Crosslink: return Crosslink( epoch=min(slot_to_epoch(data.slot), state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=data.crosslink_data_root, previous_crosslink_root=data.previous_crosslink_root, + crosslink_data_root=data.crosslink_data_root, ) ``` @@ -1641,8 +1641,9 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations shard_attestations = [a for a in pending_attestations if a.data.shard == shard] shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] - candidate_crosslinks = [c for c in shard_crosslinks if - hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c)) + candidate_crosslinks = [ + c for c in shard_crosslinks + if hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c)) ] if len(candidate_crosslinks) == 0: return Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH), [] @@ -1718,7 +1719,7 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: - state.previous_crosslinks = state.current_crosslinks + state.previous_crosslinks = [c for c in state.current_crosslinks] previous_epoch = get_previous_epoch(state) next_epoch = get_current_epoch(state) + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): diff --git a/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py new file mode 100644 index 000000000..fe694724a --- /dev/null +++ b/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py @@ -0,0 +1,136 @@ +from copy import deepcopy +import pytest + +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.state_transition import ( + state_transition, +) +from eth2spec.phase0.spec import ( + cache_state, + get_crosslink_deltas, + process_crosslinks, +) +from tests.helpers import ( + add_attestation_to_state, + build_empty_block_for_next_slot, + fill_aggregate_attestation, + get_crosslink_committee_for_attestation, + get_valid_attestation, + next_epoch, + next_slot, + set_bitfield_bit, +) + + +# mark entire file as 'crosslinks' +pytestmark = pytest.mark.crosslinks + + +def run_process_crosslinks(state, valid=True): + # transition state to slot before state transition + slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 + block = build_empty_block_for_next_slot(state) + block.slot = slot + state_transition(state, block) + + # cache state before epoch transition + cache_state(state) + + post_state = deepcopy(state) + process_crosslinks(post_state) + + return state, post_state + + +def test_no_attestations(state): + pre_state, post_state = run_process_crosslinks(state) + + for shard in range(spec.SHARD_COUNT): + assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + + return pre_state, post_state + + +def test_single_crosslink_update_from_current_epoch(state): + next_epoch(state) + + attestation = get_valid_attestation(state) + + fill_aggregate_attestation(state, attestation) + add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + + assert len(state.current_epoch_attestations) == 1 + + pre_state, post_state = run_process_crosslinks(state) + + shard = attestation.data.shard + assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] + assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + + return pre_state, post_state + + +def test_single_crosslink_update_from_previous_epoch(state): + next_epoch(state) + + attestation = get_valid_attestation(state) + + fill_aggregate_attestation(state, attestation) + add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH) + + assert len(state.previous_epoch_attestations) == 1 + + pre_state, post_state = run_process_crosslinks(state) + crosslink_deltas = get_crosslink_deltas(state) + + shard = attestation.data.shard + assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] + assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + # ensure rewarded + for index in get_crosslink_committee_for_attestation(state, attestation.data): + assert crosslink_deltas[0][index] > 0 + assert crosslink_deltas[1][index] == 0 + + return pre_state, post_state + + +def test_double_late_crosslink(state): + next_epoch(state) + state.slot += 4 + + attestation_1 = get_valid_attestation(state) + fill_aggregate_attestation(state, attestation_1) + + # add attestation_1 in the next epoch + next_epoch(state) + add_attestation_to_state(state, attestation_1, state.slot + 1) + + for slot in range(spec.SLOTS_PER_EPOCH): + attestation_2 = get_valid_attestation(state) + if attestation_2.data.shard == attestation_1.data.shard: + break + next_slot(state) + fill_aggregate_attestation(state, attestation_2) + + # add attestation_2 in the next epoch after attestation_1 has + # already updated the relevant crosslink + next_epoch(state) + add_attestation_to_state(state, attestation_2, state.slot + 1) + + assert len(state.previous_epoch_attestations) == 1 + assert len(state.current_epoch_attestations) == 0 + + pre_state, post_state = run_process_crosslinks(state) + crosslink_deltas = get_crosslink_deltas(state) + + shard = attestation_2.data.shard + + # ensure that the current crosslinks were not updated by the second attestation + assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + # ensure no reward, only penalties for the failed crosslink + for index in get_crosslink_committee_for_attestation(state, attestation_2.data): + assert crosslink_deltas[0][index] == 0 + assert crosslink_deltas[1][index] > 0 + + return pre_state, post_state From 9f4e59b0bc5c9552e79313ab3dfdeee870b6b4a0 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Thu, 18 Apr 2019 18:33:06 -0600 Subject: [PATCH 390/481] enhance finality testing -- case 1, 2, 4 --- specs/core/0_beacon-chain.md | 4 + test_libs/pyspec/tests/helpers.py | 1 - test_libs/pyspec/tests/test_finality.py | 156 ++++++++++++++++++++++++ test_libs/pyspec/tests/test_sanity.py | 27 ---- 4 files changed, 160 insertions(+), 28 deletions(-) create mode 100644 test_libs/pyspec/tests/test_finality.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index eaceec6ac..4cdfa9c4f 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1697,18 +1697,22 @@ def process_justification_and_finalization(state: BeaconState) -> None: current_epoch = get_current_epoch(state) # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: + print("rule 1") state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2: + print("rule 2") state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2: + print("rule 3") state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1: + print("rule 4") state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 44d2dcb4d..387c434a0 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -155,7 +155,6 @@ def build_attestation_data(state, slot, shard): current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) if slot < current_epoch_start_slot: - print(slot) epoch_boundary_root = get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) elif slot == current_epoch_start_slot: epoch_boundary_root = block_root diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/tests/test_finality.py new file mode 100644 index 000000000..8a429cb6e --- /dev/null +++ b/test_libs/pyspec/tests/test_finality.py @@ -0,0 +1,156 @@ +from copy import deepcopy + +import pytest + +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.state_transition import ( + state_transition, +) +from .helpers import ( + build_empty_block_for_next_slot, + fill_aggregate_attestation, + get_current_epoch, + get_epoch_start_slot, + get_valid_attestation, + next_epoch, +) + +# mark entire file as 'state' +pytestmark = pytest.mark.state + + +def check_finality(state, + prev_state, + current_justified_changed, + previous_justified_changed, + finalized_changed): + if current_justified_changed: + assert state.current_justified_epoch > prev_state.current_justified_epoch + assert state.current_justified_root != prev_state.current_justified_root + else: + assert state.current_justified_epoch == prev_state.current_justified_epoch + assert state.current_justified_root == prev_state.current_justified_root + + if previous_justified_changed: + assert state.previous_justified_epoch > prev_state.previous_justified_epoch + assert state.previous_justified_root != prev_state.previous_justified_root + else: + assert state.previous_justified_epoch == prev_state.previous_justified_epoch + assert state.previous_justified_root == prev_state.previous_justified_root + + if finalized_changed: + assert state.finalized_epoch > prev_state.finalized_epoch + assert state.finalized_root != prev_state.finalized_root + else: + assert state.finalized_epoch == prev_state.finalized_epoch + assert state.finalized_root == prev_state.finalized_root + + +def test_finality_from_genesis_rule_4(state): + test_state = deepcopy(state) + + blocks = [] + for epoch in range(6): + old_current_justified_epoch = test_state.current_justified_epoch + old_current_justified_root = test_state.current_justified_root + for slot in range(spec.SLOTS_PER_EPOCH): + attestation = None + slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + if slot_to_attest >= spec.GENESIS_SLOT: + attestation = get_valid_attestation(test_state, slot_to_attest) + fill_aggregate_attestation(test_state, attestation) + block = build_empty_block_for_next_slot(test_state) + if attestation: + block.body.attestations.append(attestation) + state_transition(test_state, block) + blocks.append(block) + + if epoch == 0: + check_finality(test_state, state, False, False, False) + elif epoch == 1: + check_finality(test_state, state, False, False, False) + elif epoch == 2: + check_finality(test_state, state, True, False, False) + elif epoch >= 3: + # rule 4 of finaliy + check_finality(test_state, state, True, True, True) + assert test_state.finalized_epoch == old_current_justified_epoch + assert test_state.finalized_root == old_current_justified_root + + return state, blocks, test_state + + +def test_finality_rule_1(state): + # get past first two epochs that finality does not run on + next_epoch(state) + next_epoch(state) + + test_state = deepcopy(state) + + blocks = [] + for epoch in range(3): + old_previous_justified_epoch = test_state.previous_justified_epoch + old_previous_justified_root = test_state.previous_justified_root + for slot in range(spec.SLOTS_PER_EPOCH): + slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 + attestation = get_valid_attestation(test_state, slot_to_attest) + fill_aggregate_attestation(test_state, attestation) + block = build_empty_block_for_next_slot(test_state) + block.body.attestations.append(attestation) + state_transition(test_state, block) + + assert len(test_state.previous_epoch_attestations) >= 0 + assert len(test_state.current_epoch_attestations) == 0 + + blocks.append(block) + + if epoch == 0: + check_finality(test_state, state, True, False, False) + elif epoch == 1: + check_finality(test_state, state, True, True, False) + elif epoch == 2: + # finalized by rule 1 + check_finality(test_state, state, True, True, True) + assert test_state.finalized_epoch == old_previous_justified_epoch + assert test_state.finalized_root == old_previous_justified_root + + +def test_finality_rule_2(state): + # get past first two epochs that finality does not run on + next_epoch(state) + next_epoch(state) + + test_state = deepcopy(state) + + blocks = [] + for epoch in range(3): + old_previous_justified_epoch = test_state.previous_justified_epoch + old_previous_justified_root = test_state.previous_justified_root + for slot in range(spec.SLOTS_PER_EPOCH): + attestation = None + if epoch == 0: + slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + if slot_to_attest >= get_epoch_start_slot(get_current_epoch(state)): + attestation = get_valid_attestation(test_state, slot_to_attest) + fill_aggregate_attestation(test_state, attestation) + if epoch == 2: + slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 + attestation = get_valid_attestation(test_state, slot_to_attest) + fill_aggregate_attestation(test_state, attestation) + + block = build_empty_block_for_next_slot(test_state) + if attestation: + block.body.attestations.append(attestation) + state_transition(test_state, block) + blocks.append(block) + + if epoch == 0: + check_finality(test_state, state, True, False, False) + elif epoch == 1: + check_finality(test_state, state, True, True, False) + elif epoch == 2: + # finalized by rule 2 + check_finality(test_state, state, True, True, True) + assert test_state.finalized_epoch == old_previous_justified_epoch + assert test_state.finalized_root == old_previous_justified_root diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 29333a7ad..e48a6b774 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -145,33 +145,6 @@ def test_empty_epoch_transition_not_finalizing(state): return state, [block], test_state -def test_full_attestations_finalizing(state): - test_state = deepcopy(state) - - for slot in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): - next_slot(test_state) - - for epoch in range(5): - for slot in range(spec.SLOTS_PER_EPOCH): - print(test_state.slot) - attestation = get_valid_attestation(test_state, test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY) - fill_aggregate_attestation(test_state, attestation) - block = build_empty_block_for_next_slot(test_state) - block.body.attestations.append(attestation) - state_transition(test_state, block) - - if epoch == 0: - check_finality(test_state, state, False, False, False) - elif epoch == 1: - check_finality(test_state, state, False, False, False) - elif epoch == 2: - check_finality(test_state, state, True, False, False) - elif epoch == 3: - check_finality(test_state, state, True, True, False) - elif epoch == 4: - check_finality(test_state, state, True, True, True) - - def test_proposer_slashing(state): test_state = deepcopy(state) proposer_slashing = get_valid_proposer_slashing(state) From 972168d69521e21b1ab25b2f603cab2cb45058f3 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Apr 2019 18:49:38 +1000 Subject: [PATCH 391/481] Make type-inference stable on empty lists/vectors --- test_libs/pyspec/eth2spec/utils/minimal_ssz.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py index 8f42f1f65..ff7ab6027 100644 --- a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py +++ b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py @@ -262,7 +262,6 @@ def infer_type(value): else: raise Exception("Failed to infer type") - def hash_tree_root(value, typ=None): if typ is None: typ = infer_type(value) From 23d6b468e39fee37b4567f430185e6283869e756 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 16 Apr 2019 20:50:13 +1000 Subject: [PATCH 392/481] Work towards testing all edge-cases of SSZ, for known (static) object types --- scripts/phase0/function_puller.py | 5 + .../{ssz => ssz_generic}/__init__.py | 0 test_generators/{ssz => ssz_generic}/main.py | 2 +- .../{ssz => ssz_generic}/renderers.py | 0 .../{ssz => ssz_generic}/requirements.txt | 0 .../{ssz => ssz_generic}/uint_test_cases.py | 0 test_generators/ssz_static/README.md | 4 + test_generators/ssz_static/__init__.py | 0 test_generators/ssz_static/main.py | 55 ++++++++ test_generators/ssz_static/requirements.txt | 4 + .../pyspec/eth2spec/debug/random_value.py | 126 ++++++++++++++++++ 11 files changed, 195 insertions(+), 1 deletion(-) rename test_generators/{ssz => ssz_generic}/__init__.py (100%) rename test_generators/{ssz => ssz_generic}/main.py (93%) rename test_generators/{ssz => ssz_generic}/renderers.py (100%) rename test_generators/{ssz => ssz_generic}/requirements.txt (100%) rename test_generators/{ssz => ssz_generic}/uint_test_cases.py (100%) create mode 100644 test_generators/ssz_static/README.md create mode 100644 test_generators/ssz_static/__init__.py create mode 100644 test_generators/ssz_static/main.py create mode 100644 test_generators/ssz_static/requirements.txt create mode 100644 test_libs/pyspec/eth2spec/debug/random_value.py diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index 59e5b5e24..635797d39 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -62,4 +62,9 @@ def get_spec(file_name: str) -> List[str]: code_lines.append('') for type_line in ssz_type: code_lines.append(' ' + type_line) + code_lines.append('') + code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']') + code_lines.append('') + code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType: return globals()[name]') + code_lines.append('') return code_lines diff --git a/test_generators/ssz/__init__.py b/test_generators/ssz_generic/__init__.py similarity index 100% rename from test_generators/ssz/__init__.py rename to test_generators/ssz_generic/__init__.py diff --git a/test_generators/ssz/main.py b/test_generators/ssz_generic/main.py similarity index 93% rename from test_generators/ssz/main.py rename to test_generators/ssz_generic/main.py index 1c09d51e7..fe01a68d7 100644 --- a/test_generators/ssz/main.py +++ b/test_generators/ssz_generic/main.py @@ -44,4 +44,4 @@ def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: if __name__ == "__main__": - gen_runner.run_generator("ssz", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite]) + gen_runner.run_generator("ssz_generic", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite]) diff --git a/test_generators/ssz/renderers.py b/test_generators/ssz_generic/renderers.py similarity index 100% rename from test_generators/ssz/renderers.py rename to test_generators/ssz_generic/renderers.py diff --git a/test_generators/ssz/requirements.txt b/test_generators/ssz_generic/requirements.txt similarity index 100% rename from test_generators/ssz/requirements.txt rename to test_generators/ssz_generic/requirements.txt diff --git a/test_generators/ssz/uint_test_cases.py b/test_generators/ssz_generic/uint_test_cases.py similarity index 100% rename from test_generators/ssz/uint_test_cases.py rename to test_generators/ssz_generic/uint_test_cases.py diff --git a/test_generators/ssz_static/README.md b/test_generators/ssz_static/README.md new file mode 100644 index 000000000..014c71517 --- /dev/null +++ b/test_generators/ssz_static/README.md @@ -0,0 +1,4 @@ +# SSZ-static + +The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: + the serialization and hashing of ETH 2.0 data types diff --git a/test_generators/ssz_static/__init__.py b/test_generators/ssz_static/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py new file mode 100644 index 000000000..19942c0e8 --- /dev/null +++ b/test_generators/ssz_static/main.py @@ -0,0 +1,55 @@ +from eth_utils import ( + to_tuple, to_dict +) +from preset_loader import loader +from eth2spec.phase0 import spec +from eth2spec.utils.minimal_ssz import hash_tree_root, serialize +from eth2spec.debug import random_value, encode + +from gen_base import gen_runner, gen_suite, gen_typing +from random import Random + + +@to_dict +def render_test_case(rng: Random, name): + typ = spec.get_ssz_type_by_name(name) + # TODO: vary randomization args + value = random_value.get_random_ssz_object(rng, typ, 100, 10, random_value.RandomizationMode.mode_random, False) + yield "type_name", name + yield "value", encode.encode(value, typ) + yield "serialized", serialize(value) + yield "root", '0x' + hash_tree_root(value).hex() + + +@to_tuple +def ssz_static_cases(rng: Random): + for type_name in spec.ssz_types: + # TODO more types + for i in range(10): + render_test_case(rng, type_name) + + +def min_ssz_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'minimal') + spec.apply_constants_preset(presets) + rng = Random(123) + + return ("ssz_min_values_minimal", "core", gen_suite.render_suite( + title="ssz testing, with minimal config", + summary="Test suite for ssz serialization and hash-tree-root", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + runner="ssz", + handler="static", + test_cases=ssz_static_cases(rng))) + +# TODO more suites + +# Variation in: randomization-mode, chaos mode, configuration + + +if __name__ == "__main__": + gen_runner.run_generator("ssz_static", [ + min_ssz_suite + ]) diff --git a/test_generators/ssz_static/requirements.txt b/test_generators/ssz_static/requirements.txt new file mode 100644 index 000000000..8f9bede8f --- /dev/null +++ b/test_generators/ssz_static/requirements.txt @@ -0,0 +1,4 @@ +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec \ No newline at end of file diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py new file mode 100644 index 000000000..431a4986a --- /dev/null +++ b/test_libs/pyspec/eth2spec/debug/random_value.py @@ -0,0 +1,126 @@ +from random import Random +from typing import Any +from enum import Enum + +UINT_SIZES = [8, 16, 32, 64, 128, 256] + +basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte'] + +class RandomizationMode(Enum): + # random content / length + mode_random = 0 + # Zero-value + mode_zero = 2 + # Maximum value, limited to count 1 however + mode_max = 3 + # Return 0 values, i.e. empty + mode_nil_count = 4 + # Return 1 value, random content + mode_one_count = 5 + # Return max amount of values, random content + mode_max_count = 6 + +def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list_length: int, mode: RandomizationMode, chaos: bool) -> Any: + """ + Create an object for a given type, filled with random data. + :param rng: The random number generator to use. + :param typ: The type to instantiate + :param max_bytes_length: the max. length for a random bytes array + :param max_list_length: the max. length for a random list + :param mode: how to randomize + :param chaos: if true, the randomization-mode will be randomly changed + :return: the random object instance, of the given type. + """ + if chaos: + mode = rng.choice(list(RandomizationMode)) + if isinstance(typ, str): + # Bytes array + if typ == 'bytes': + if mode == RandomizationMode.mode_nil_count: + return b'' + if mode == RandomizationMode.mode_max_count: + return get_random_bytes_list(rng, max_bytes_length) + if mode == RandomizationMode.mode_one_count: + return get_random_bytes_list(rng, 1) + if mode == RandomizationMode.mode_zero: + return b'\x00' + if mode == RandomizationMode.mode_max: + return b'\xff' + return get_random_bytes_list(rng, rng.randint(0, max_bytes_length)) + elif typ[:5] == 'bytes' and len(typ) > 5: + length = int(typ[5:]) + # Sanity, don't generate absurdly big random values + # If a client is aiming to performance-test, they should create a benchmark suite. + assert length <= max_bytes_length + if mode == RandomizationMode.mode_zero: + return b'\x00' * length + if mode == RandomizationMode.mode_max: + return b'\xff' * length + return get_random_bytes_list(rng, length) + # Basic types + else: + if mode == RandomizationMode.mode_zero: + return get_min_basic_value(typ) + if mode == RandomizationMode.mode_max: + return get_max_basic_value(typ) + return get_random_basic_value(rng, typ) + # Vector: + elif isinstance(typ, list) and len(typ) == 2: + return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode) for _ in range(typ[1])] + # List: + elif isinstance(typ, list) and len(typ) == 1: + length = rng.randint(0, max_list_length) + if mode == RandomizationMode.mode_one_count: + length = 1 + if mode == RandomizationMode.mode_max_count: + length = max_list_length + return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode) for _ in range(length)] + # Container: + elif hasattr(typ, 'fields'): + return typ({field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode) for field, subtype in typ.fields.items()}) + else: + print(typ) + raise Exception("Type not recognized") + + +def get_random_bytes_list(rng: Random, length: int) -> bytes: + return bytes(rng.getrandbits(8) for _ in range(length)) + + +def get_random_basic_value(rng: Random, typ: str) -> Any: + if typ == 'bool': + return rng.choice((True, False)) + if typ[:4] == 'uint': + size = int(typ[4:]) + assert size in (8, 16, 32, 64, 128, 256) + return rng.randint(0, 2**size - 1) + if typ == 'byte': + return rng.randint(0, 8) + else: + raise ValueError("Not a basic type") + + +def get_min_basic_value(typ: str) -> Any: + if typ == 'bool': + return False + if typ[:4] == 'uint': + size = int(typ[4:]) + assert size in (8, 16, 32, 64, 128, 256) + return 0 + if typ == 'byte': + return 0x00 + else: + raise ValueError("Not a basic type") + + +def get_max_basic_value(typ: str) -> Any: + if typ == 'bool': + return True + if typ[:4] == 'uint': + size = int(typ[4:]) + assert size in (8, 16, 32, 64, 128, 256) + return 2**size - 1 + if typ == 'byte': + return 0xff + else: + raise ValueError("Not a basic type") From ad30722420ffd09ff3ea79f9dec01857ae0dc5de Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Apr 2019 13:49:29 +1000 Subject: [PATCH 393/481] ssz-static suite --- test_generators/ssz_static/main.py | 84 ++++++++++++------- .../pyspec/eth2spec/debug/random_value.py | 27 ++++-- 2 files changed, 73 insertions(+), 38 deletions(-) diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py index 19942c0e8..2445e4ab7 100644 --- a/test_generators/ssz_static/main.py +++ b/test_generators/ssz_static/main.py @@ -1,55 +1,79 @@ +from random import Random + +from eth2spec.debug import random_value, encode +from eth2spec.phase0 import spec +from eth2spec.utils.minimal_ssz import hash_tree_root, serialize from eth_utils import ( to_tuple, to_dict ) -from preset_loader import loader -from eth2spec.phase0 import spec -from eth2spec.utils.minimal_ssz import hash_tree_root, serialize -from eth2spec.debug import random_value, encode - from gen_base import gen_runner, gen_suite, gen_typing -from random import Random +from preset_loader import loader + +MAX_BYTES_LENGTH = 100 +MAX_LIST_LENGTH = 10 @to_dict -def render_test_case(rng: Random, name): +def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMode, chaos: bool): typ = spec.get_ssz_type_by_name(name) - # TODO: vary randomization args - value = random_value.get_random_ssz_object(rng, typ, 100, 10, random_value.RandomizationMode.mode_random, False) + value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos) yield "type_name", name yield "value", encode.encode(value, typ) - yield "serialized", serialize(value) + yield "serialized", serialize(value).hex() yield "root", '0x' + hash_tree_root(value).hex() @to_tuple -def ssz_static_cases(rng: Random): +def ssz_static_cases(rng: Random, mode: random_value.RandomizationMode, chaos: bool, count: int): for type_name in spec.ssz_types: - # TODO more types - for i in range(10): - render_test_case(rng, type_name) + for i in range(count): + yield create_test_case(rng, type_name, mode, chaos) -def min_ssz_suite(configs_path: str) -> gen_typing.TestSuiteOutput: - presets = loader.load_presets(configs_path, 'minimal') - spec.apply_constants_preset(presets) - rng = Random(123) +def get_ssz_suite(seed: int, config_name: str, mode: random_value.RandomizationMode, chaos: bool, cases_if_random: int): + def ssz_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + # Apply changes to presets, this affects some of the vector types. + presets = loader.load_presets(configs_path, config_name) + spec.apply_constants_preset(presets) - return ("ssz_min_values_minimal", "core", gen_suite.render_suite( - title="ssz testing, with minimal config", - summary="Test suite for ssz serialization and hash-tree-root", - forks_timeline="testing", - forks=["phase0"], - config="minimal", - runner="ssz", - handler="static", - test_cases=ssz_static_cases(rng))) + # Reproducible RNG + rng = Random(seed) -# TODO more suites + random_mode_name = mode.to_name() -# Variation in: randomization-mode, chaos mode, configuration + suite_name = f"ssz_{config_name}_{random_mode_name}{'_chaos' if chaos else ''}" + + count = cases_if_random if chaos or mode.is_changing() else 1 + print(f"generating SSZ-static suite ({count} cases per ssz type): {suite_name}") + + return (suite_name, "core", gen_suite.render_suite( + title=f"ssz testing, with {config_name} config, randomized with mode {random_mode_name}{' and with chaos applied' if chaos else ''}", + summary="Test suite for ssz serialization and hash-tree-root", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + runner="ssz", + handler="static", + test_cases=ssz_static_cases(rng, mode, chaos, count))) + + return ssz_suite if __name__ == "__main__": + # [(seed, config name, randomization mode, chaos on/off, cases_if_random)] + settings = [] + seed = 1 + for mode in random_value.RandomizationMode: + settings.append((seed, "minimal", mode, False, 30)) + seed += 1 + settings.append((seed, "minimal", random_value.RandomizationMode.mode_random, True, 30)) + seed += 1 + settings.append((seed, "mainnet", random_value.RandomizationMode.mode_random, False, 5)) + seed += 1 + + print("Settings: %d, SSZ-types: %d" % (len(settings), len(spec.ssz_types))) + gen_runner.run_generator("ssz_static", [ - min_ssz_suite + get_ssz_suite(seed, config_name, mode, chaos, cases_if_random) + for (seed, config_name, mode, chaos, cases_if_random) in settings ]) diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py index 431a4986a..59ed5b54b 100644 --- a/test_libs/pyspec/eth2spec/debug/random_value.py +++ b/test_libs/pyspec/eth2spec/debug/random_value.py @@ -2,23 +2,34 @@ from random import Random from typing import Any from enum import Enum + UINT_SIZES = [8, 16, 32, 64, 128, 256] basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte'] +random_mode_names = ["random", "zero", "max", "nil", "one", "lengthy"] + + class RandomizationMode(Enum): # random content / length mode_random = 0 # Zero-value - mode_zero = 2 + mode_zero = 1 # Maximum value, limited to count 1 however - mode_max = 3 + mode_max = 2 # Return 0 values, i.e. empty - mode_nil_count = 4 + mode_nil_count = 3 # Return 1 value, random content - mode_one_count = 5 + mode_one_count = 4 # Return max amount of values, random content - mode_max_count = 6 + mode_max_count = 5 + + def to_name(self): + return random_mode_names[self.value] + + def is_changing(self): + return self.value in [0, 4, 5] + def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list_length: int, mode: RandomizationMode, chaos: bool) -> Any: """ @@ -66,7 +77,7 @@ def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list return get_random_basic_value(rng, typ) # Vector: elif isinstance(typ, list) and len(typ) == 2: - return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode) for _ in range(typ[1])] + return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(typ[1])] # List: elif isinstance(typ, list) and len(typ) == 1: length = rng.randint(0, max_list_length) @@ -74,10 +85,10 @@ def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list length = 1 if mode == RandomizationMode.mode_max_count: length = max_list_length - return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode) for _ in range(length)] + return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(length)] # Container: elif hasattr(typ, 'fields'): - return typ({field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode) for field, subtype in typ.fields.items()}) + return typ(**{field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos) for field, subtype in typ.fields.items()}) else: print(typ) raise Exception("Type not recognized") From 40cc0420abeab2154dac32b94ccf478543cab792 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Apr 2019 19:31:06 +1000 Subject: [PATCH 394/481] fix test output format --- test_generators/ssz_static/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py index 2445e4ab7..149b83626 100644 --- a/test_generators/ssz_static/main.py +++ b/test_generators/ssz_static/main.py @@ -19,7 +19,7 @@ def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMod value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos) yield "type_name", name yield "value", encode.encode(value, typ) - yield "serialized", serialize(value).hex() + yield "serialized", '0x' + serialize(value).hex() yield "root", '0x' + hash_tree_root(value).hex() From 627c1a347af1ca3d7f4494b198928b58e2dc2823 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Apr 2019 11:37:02 +1000 Subject: [PATCH 395/481] make encoder output large uints as string, and fix ssz suite config setting --- test_generators/ssz_static/main.py | 2 +- test_libs/pyspec/eth2spec/debug/encode.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py index 149b83626..010ca2735 100644 --- a/test_generators/ssz_static/main.py +++ b/test_generators/ssz_static/main.py @@ -51,7 +51,7 @@ def get_ssz_suite(seed: int, config_name: str, mode: random_value.RandomizationM summary="Test suite for ssz serialization and hash-tree-root", forks_timeline="testing", forks=["phase0"], - config="minimal", + config=config_name, runner="ssz", handler="static", test_cases=ssz_static_cases(rng, mode, chaos, count))) diff --git a/test_libs/pyspec/eth2spec/debug/encode.py b/test_libs/pyspec/eth2spec/debug/encode.py index f50bc9d5e..d3513e638 100644 --- a/test_libs/pyspec/eth2spec/debug/encode.py +++ b/test_libs/pyspec/eth2spec/debug/encode.py @@ -3,6 +3,8 @@ from eth2spec.utils.minimal_ssz import hash_tree_root def encode(value, typ, include_hash_tree_roots=False): if isinstance(typ, str) and typ[:4] == 'uint': + if typ[4:] == '128' or typ[4:] == '256': + return str(value) return value elif typ == 'bool': assert value in (True, False) From bc685133ece8b4deb94602567a6cef2812b40bec Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Apr 2019 18:40:11 +1000 Subject: [PATCH 396/481] Document SSZ testing, and test-suite running --- specs/test_formats/README.md | 21 +++++++++++++++++ specs/test_formats/ssz/README.md | 15 ------------ specs/test_formats/ssz_generic/README.md | 20 ++++++++++++++++ .../test_formats/{ssz => ssz_generic}/uint.md | 0 specs/test_formats/ssz_static/README.md | 8 +++++++ specs/test_formats/ssz_static/core.md | 23 +++++++++++++++++++ 6 files changed, 72 insertions(+), 15 deletions(-) delete mode 100644 specs/test_formats/ssz/README.md create mode 100644 specs/test_formats/ssz_generic/README.md rename specs/test_formats/{ssz => ssz_generic}/uint.md (100%) create mode 100644 specs/test_formats/ssz_static/README.md create mode 100644 specs/test_formats/ssz_static/core.md diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index 6b9533056..da2e38c01 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -175,3 +175,24 @@ To prevent parsing of hundreds of different YAML files to test a specific test t │   ... <--- more handlers ... <--- more test types ``` + + +## Note for implementers + +The basic pattern for test-suite loading and running is: + +Iterate suites for given test-type, or sub-type (e.g. `operations > deposits`): +1. Filter test-suite, options: + - Config: Load first few lines, load into YAML, and check `config`, either: + - Pass the suite to the correct compiled target + - Ignore the suite if running tests as part of a compiled target with different configuration + - Load the correct configuration for the suite dynamically before running the suite + - Select by file name + - Filter for specific suites (e.g. for a specific fork) +2. Load the YAML + - Optionally translate the data into applicable naming, e.g. `snake_case` to `PascalCase` +3. Iterate through the `test_cases` +4. Ask test-runner to allocate a new test-case (i.e. objectify the test-case, generalize it with a `TestCase` interface) + Optionally pass raw test-case data to enable dynamic test-case allocation. + 1. Load test-case data into it. + 2. Make the test-case run. diff --git a/specs/test_formats/ssz/README.md b/specs/test_formats/ssz/README.md deleted file mode 100644 index 72ba7dac1..000000000 --- a/specs/test_formats/ssz/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# SSZ tests - -SSZ has changed throughout the development of ETH 2.0. - -## Contents - -A minimal but useful series of tests covering `uint` encoding and decoding is provided. -This is a direct port of the older SSZ `uint` tests (minus outdated test cases). - -[uint test format](./uint.md). - -Note: the current phase-0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc. -The exact uint lengths to support may be redefined in the future. - -Extension of the SSZ tests collection is planned, see CI/testing issues for progress tracking. diff --git a/specs/test_formats/ssz_generic/README.md b/specs/test_formats/ssz_generic/README.md new file mode 100644 index 000000000..9fda0c368 --- /dev/null +++ b/specs/test_formats/ssz_generic/README.md @@ -0,0 +1,20 @@ +# SSZ, generic tests + +This set of test-suites provides general testing for SSZ: + to instantiate any container/list/vector/other type from binary data. + +Since SSZ is in a development-phase, not the full suite of features is covered yet. +Note that these tests are based on the older SSZ package. +The tests are still relevant, but limited in scope: + more complex object encodings have changed since the original SSZ testing. + +A minimal but useful series of tests covering `uint` encoding and decoding is provided. +This is a direct port of the older SSZ `uint` tests (minus outdated test cases). + +[uint test format](./uint.md). + +Note: the current phase-0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc. +The exact uint lengths to support may be redefined in the future. + +Extension of the SSZ tests collection is planned, with an update to the new spec-maintained `minimal_ssz.py`, + see CI/testing issues for progress tracking. diff --git a/specs/test_formats/ssz/uint.md b/specs/test_formats/ssz_generic/uint.md similarity index 100% rename from specs/test_formats/ssz/uint.md rename to specs/test_formats/ssz_generic/uint.md diff --git a/specs/test_formats/ssz_static/README.md b/specs/test_formats/ssz_static/README.md new file mode 100644 index 000000000..413b00c75 --- /dev/null +++ b/specs/test_formats/ssz_static/README.md @@ -0,0 +1,8 @@ +# SSZ, static tests + +This set of test-suites provides static testing for SSZ: + to instantiate just the known ETH-2.0 SSZ types from binary data. + +This series of tests is based on the spec-maintained `minimal_ssz.py`, i.e. fully consistent with the SSZ spec. + +Test format documentation can be found here: [core test format](./core.md). diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md new file mode 100644 index 000000000..8a5067f03 --- /dev/null +++ b/specs/test_formats/ssz_static/core.md @@ -0,0 +1,23 @@ +# Test format: SSZ static types + +The goal of this type is to provide clients with a solid reference how the known SSZ objects should be encoded. +Each object described in the Phase-0 spec is covered. +This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes +do not support (or have alternatives for) generic SSZ encoding/decoding. +This test-format ensures these direct serializations are covered. + +## Test case format + +```yaml +type_name: string -- string, object name, formatted as in spec. E.g. "BeaconBlock" +value: dynamic -- the YAML-encoded value, of the type specified by type_name. +serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x +root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x +``` + +## Condition + +A test-runner can implement the following assertions: +- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized` +- Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root` +- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value` From 3a5243cc898c42c777060aeab724f1bc09bec230 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 19 Apr 2019 12:09:30 +1000 Subject: [PATCH 397/481] apply PR suggestions from djrtwo --- test_libs/pyspec/eth2spec/debug/random_value.py | 6 +++--- test_libs/pyspec/eth2spec/utils/minimal_ssz.py | 10 ++++------ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py index 59ed5b54b..a853d2328 100644 --- a/test_libs/pyspec/eth2spec/debug/random_value.py +++ b/test_libs/pyspec/eth2spec/debug/random_value.py @@ -103,7 +103,7 @@ def get_random_basic_value(rng: Random, typ: str) -> Any: return rng.choice((True, False)) if typ[:4] == 'uint': size = int(typ[4:]) - assert size in (8, 16, 32, 64, 128, 256) + assert size in UINT_SIZES return rng.randint(0, 2**size - 1) if typ == 'byte': return rng.randint(0, 8) @@ -116,7 +116,7 @@ def get_min_basic_value(typ: str) -> Any: return False if typ[:4] == 'uint': size = int(typ[4:]) - assert size in (8, 16, 32, 64, 128, 256) + assert size in UINT_SIZES return 0 if typ == 'byte': return 0x00 @@ -129,7 +129,7 @@ def get_max_basic_value(typ: str) -> Any: return True if typ[:4] == 'uint': size = int(typ[4:]) - assert size in (8, 16, 32, 64, 128, 256) + assert size in UINT_SIZES return 2**size - 1 if typ == 'byte': return 0xff diff --git a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py index ff7ab6027..dbe9d1359 100644 --- a/test_libs/pyspec/eth2spec/utils/minimal_ssz.py +++ b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py @@ -1,7 +1,7 @@ -from .hash_function import hash - from typing import Any +from .hash_function import hash + BYTES_PER_CHUNK = 32 BYTES_PER_LENGTH_PREFIX = 4 ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK @@ -17,10 +17,7 @@ def SSZType(fields): setattr(self, f, kwargs[f]) def __eq__(self, other): - return ( - self.fields == other.fields and - self.serialize() == other.serialize() - ) + return self.fields == other.fields and self.serialize() == other.serialize() def __hash__(self): return int.from_bytes(self.hash_tree_root(), byteorder="little") @@ -262,6 +259,7 @@ def infer_type(value): else: raise Exception("Failed to infer type") + def hash_tree_root(value, typ=None): if typ is None: typ = infer_type(value) From fad9b4672aa1340ead82c227c7c4d72b46dbc399 Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 19 Apr 2019 18:09:29 +1000 Subject: [PATCH 398/481] Disallow transfers As discussed in yesterday's call, temporarily disable transfers until the network is deemed stable enough. We can consider doing a "test-run hard fork" changing this constant prior to the phase 1 hard fork. --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f04a04877..a133fcf42 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -257,7 +257,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MAX_ATTESTATIONS` | `2**7` (= 128) | | `MAX_DEPOSITS` | `2**4` (= 16) | | `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) | -| `MAX_TRANSFERS` | `2**4` (= 16) | +| `MAX_TRANSFERS` | `0` | ### Signature domains From 39d082260293ca2afb06042ed0087e3026dd7ebc Mon Sep 17 00:00:00 2001 From: Justin Date: Fri, 19 Apr 2019 18:26:54 +1000 Subject: [PATCH 399/481] Sane SSZ object default values (#963) --- specs/core/0_beacon-chain.md | 104 ++----------------------- specs/simple-serialize.md | 5 ++ test_generators/operations/deposits.py | 1 - test_libs/pyspec/tests/helpers.py | 9 +-- test_libs/pyspec/tests/test_sanity.py | 3 - 5 files changed, 14 insertions(+), 108 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index f04a04877..624413879 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -51,7 +51,6 @@ - [`hash`](#hash) - [`hash_tree_root`](#hash_tree_root) - [`signing_root`](#signing_root) - - [`get_temporary_block_header`](#get_temporary_block_header) - [`slot_to_epoch`](#slot_to_epoch) - [`get_previous_epoch`](#get_previous_epoch) - [`get_current_epoch`](#get_current_epoch) @@ -201,13 +200,10 @@ These configurations are updated for releases, but may be out of sync during `de | Name | Value | | - | - | -| `GENESIS_FORK_VERSION` | `int_to_bytes4(0)` | | `GENESIS_SLOT` | `0` | | `GENESIS_EPOCH` | `0` | -| `GENESIS_START_SHARD` | `0` | | `FAR_FUTURE_EPOCH` | `2**64 - 1` | | `ZERO_HASH` | `int_to_bytes32(0)` | -| `EMPTY_SIGNATURE` | `int_to_bytes96(0)` | | `BLS_WITHDRAWAL_PREFIX_BYTE` | `int_to_bytes1(0)` | ### Time parameters @@ -640,23 +636,6 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere `def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. -### `get_temporary_block_header` - -```python -def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: - """ - Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. - """ - return BeaconBlockHeader( - slot=block.slot, - previous_block_root=block.previous_block_root, - state_root=ZERO_HASH, - block_body_root=hash_tree_root(block.body), - # signing_root(block) is used for block id purposes so signature is a stub - signature=EMPTY_SIGNATURE, - ) -``` - ### `slot_to_epoch` ```python @@ -1345,35 +1324,7 @@ When enough full deposits have been made to the deposit contract, an `Eth2Genesi * `genesis_eth1_data.deposit_count` is the `deposit_count` contained in the `Eth2Genesis` log. * `genesis_eth1_data.block_hash` is the hash of the Ethereum 1.0 block that emitted the `Eth2Genesis` log. * Let `genesis_state = get_genesis_beacon_state(genesis_validator_deposits, genesis_time, genesis_eth1_data)`. -* Let `genesis_block = get_empty_block()`. -* Set `genesis_block.state_root = hash_tree_root(genesis_state)`. - -```python -def get_empty_block() -> BeaconBlock: - """ - Get an empty ``BeaconBlock``. - """ - return BeaconBlock( - slot=GENESIS_SLOT, - previous_block_root=ZERO_HASH, - state_root=ZERO_HASH, - body=BeaconBlockBody( - randao_reveal=EMPTY_SIGNATURE, - eth1_data=Eth1Data( - deposit_root=ZERO_HASH, - deposit_count=0, - block_hash=ZERO_HASH, - ), - proposer_slashings=[], - attester_slashings=[], - attestations=[], - deposits=[], - voluntary_exits=[], - transfers=[], - ), - signature=EMPTY_SIGNATURE, - ) -``` +* Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`. ```python def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], @@ -1382,50 +1333,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], """ Get the genesis ``BeaconState``. """ - state = BeaconState( - # Misc - slot=GENESIS_SLOT, - genesis_time=genesis_time, - fork=Fork( - previous_version=GENESIS_FORK_VERSION, - current_version=GENESIS_FORK_VERSION, - epoch=GENESIS_EPOCH, - ), - - # Validator registry - validator_registry=[], - balances=[], - - # Randomness and committees - latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), - latest_start_shard=GENESIS_START_SHARD, - - # Finality - previous_epoch_attestations=[], - current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH, - current_justified_epoch=GENESIS_EPOCH, - previous_justified_root=ZERO_HASH, - current_justified_root=ZERO_HASH, - justification_bitfield=0, - finalized_epoch=GENESIS_EPOCH, - finalized_root=ZERO_HASH, - - # Recent state - current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), - previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), - latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), - latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), - latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), - latest_slashed_balances=Vector([0 for _ in range(LATEST_SLASHED_EXIT_LENGTH)]), - latest_block_header=get_temporary_block_header(get_empty_block()), - historical_roots=[], - - # Ethereum 1.0 chain data - latest_eth1_data=genesis_eth1_data, - eth1_data_votes=[], - deposit_index=0, - ) + state = BeaconState(genesis_time=genesis_time, latest_eth1_data=genesis_eth1_data) # Process genesis deposits for deposit in genesis_validator_deposits: @@ -1929,7 +1837,11 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the parent matches assert block.previous_block_root == signing_root(state.latest_block_header) # Save current block as the new latest block - state.latest_block_header = get_temporary_block_header(block) + state.latest_block_header = BeaconBlockHeader( + slot=block.slot, + previous_block_root=block.previous_block_root, + block_body_root=hash_tree_root(block.body), + ) # Verify proposer is not slashed proposer = state.validator_registry[get_beacon_proposer_index(state)] assert not proposer.slashed @@ -2132,8 +2044,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - slashed=False, - high_balance=0 ) # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 804c66d70..f30d34709 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -9,6 +9,7 @@ This is a **work in progress** describing typing, serialization and Merkleizatio - [Basic types](#basic-types) - [Composite types](#composite-types) - [Aliases](#aliases) + - [Default values](#default-values) - [Serialization](#serialization) - [`"uintN"`](#uintn) - [`"bool"`](#bool) @@ -50,6 +51,10 @@ For convenience we alias: * `"bytes"` to `["byte"]` (this is *not* a basic type) * `"bytesN"` to `["byte", N]` (this is *not* a basic type) +### Default values + +The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists. + ## Serialization We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index 85c93f86b..454c6f22d 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -24,7 +24,6 @@ def build_deposit_data(state, pubkey=pubkey, withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[1:], amount=amount, - proof_of_possession=spec.EMPTY_SIGNATURE, ) deposit_data.proof_of_possession = bls.sign( message_hash=signing_root(deposit_data), diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index e04409792..be43ac6aa 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -9,13 +9,13 @@ import eth2spec.phase0.spec as spec from eth2spec.utils.minimal_ssz import signing_root from eth2spec.phase0.spec import ( # constants - EMPTY_SIGNATURE, ZERO_HASH, # SSZ Attestation, AttestationData, AttestationDataAndCustodyBit, AttesterSlashing, + BeaconBlock, BeaconBlockHeader, Deposit, DepositData, @@ -30,7 +30,6 @@ from eth2spec.phase0.spec import ( get_crosslink_committees_at_slot, get_current_epoch, get_domain, - get_empty_block, get_epoch_start_slot, get_genesis_beacon_state, get_previous_epoch, @@ -115,7 +114,7 @@ def create_genesis_state(num_validators, deposit_data_leaves=None): def build_empty_block_for_next_slot(state): - empty_block = get_empty_block() + empty_block = BeaconBlock() empty_block.slot = state.slot + 1 previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: @@ -130,7 +129,6 @@ def build_deposit_data(state, pubkey, privkey, amount): # insecurely use pubkey as withdrawal key as well withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], amount=amount, - signature=EMPTY_SIGNATURE, ) signature = bls.sign( message_hash=signing_root(deposit_data), @@ -185,7 +183,6 @@ def build_voluntary_exit(state, epoch, validator_index, privkey): voluntary_exit = VoluntaryExit( epoch=epoch, validator_index=validator_index, - signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( message_hash=signing_root(voluntary_exit), @@ -235,7 +232,6 @@ def get_valid_proposer_slashing(state): previous_block_root=ZERO_HASH, state_root=ZERO_HASH, block_body_root=ZERO_HASH, - signature=EMPTY_SIGNATURE, ) header_2 = deepcopy(header_1) header_2.previous_block_root = b'\x02' * 32 @@ -304,7 +300,6 @@ def get_valid_attestation(state, slot=None): aggregation_bitfield=aggregation_bitfield, data=attestation_data, custody_bitfield=custody_bitfield, - aggregate_signature=EMPTY_SIGNATURE, ) participants = get_attesting_indices( state, diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 7ddd4d386..ba9dc6df6 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -8,7 +8,6 @@ import eth2spec.phase0.spec as spec from eth2spec.utils.minimal_ssz import signing_root from eth2spec.phase0.spec import ( # constants - EMPTY_SIGNATURE, ZERO_HASH, # SSZ Deposit, @@ -350,7 +349,6 @@ def test_voluntary_exit(state): voluntary_exit = VoluntaryExit( epoch=get_current_epoch(pre_state), validator_index=validator_index, - signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( message_hash=signing_root(voluntary_exit), @@ -398,7 +396,6 @@ def test_transfer(state): fee=0, slot=pre_state.slot + 1, pubkey=transfer_pubkey, - signature=EMPTY_SIGNATURE, ) transfer.signature = bls.sign( message_hash=signing_root(transfer), From 66cf4e95c149fe156adf46db377a62aa66456283 Mon Sep 17 00:00:00 2001 From: Dmitrii Shmatko Date: Fri, 19 Apr 2019 18:43:26 +0300 Subject: [PATCH 400/481] Added signing_root to ssz_static tests --- test_generators/ssz_static/README.md | 17 +++++++++++++++++ test_generators/ssz_static/main.py | 8 +++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/test_generators/ssz_static/README.md b/test_generators/ssz_static/README.md index 014c71517..01892ecc2 100644 --- a/test_generators/ssz_static/README.md +++ b/test_generators/ssz_static/README.md @@ -2,3 +2,20 @@ The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: the serialization and hashing of ETH 2.0 data types + +#### Test case +Example: +```yaml +- type_name: DepositData + value: {pubkey: '0x364194dbcda9974ec8e57aa0d556ced515e43ce450e21aa8f9b2099a528679fcf45aed142db60b7f848bd399b63f0933', + withdrawal_credentials: '0xad1256c89ae823b24e1d81fae3d3d382d60012d8399f469ff404e3bbf908027a', + amount: 2672254660871140633, signature: '0x5c3fe3bdbf58d0fb4cdb63a19a67082c697ef910c182dc824c8fb048c935b4b46f522c36047ae36feef84654c1e868f3a0edd76852c09e35414782160767439b49aceaa4219cc25016effcc82a9e17b336efee40ab37e3a47fc31da557027491'} + serialized: '0x364194dbcda9974ec8e57aa0d556ced515e43ce450e21aa8f9b2099a528679fcf45aed142db60b7f848bd399b63f0933ad1256c89ae823b24e1d81fae3d3d382d60012d8399f469ff404e3bbf908027a19359bb274c115255c3fe3bdbf58d0fb4cdb63a19a67082c697ef910c182dc824c8fb048c935b4b46f522c36047ae36feef84654c1e868f3a0edd76852c09e35414782160767439b49aceaa4219cc25016effcc82a9e17b336efee40ab37e3a47fc31da557027491' + root: '0x2eaae270579fc1a1eabde69c841221cb3dfab9de7ad99fcfbee8fe0c198878b7' + signing_root: '0x844655facb151b633410ffc698d8467c6488ae87f2d5f739d39c9bfc18750524' +``` +**type_name** - Name of valid Eth2.0 type from the spec +**value** - Field values used to create type instance +**serialized** - [SSZ serialization](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#serialization) of the value +**root** - [hash_tree_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#merkleization) of the value +**signing_root** - (Optional) [signing_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) of the value, if type contains ``signature`` field \ No newline at end of file diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py index 010ca2735..1234294db 100644 --- a/test_generators/ssz_static/main.py +++ b/test_generators/ssz_static/main.py @@ -2,7 +2,11 @@ from random import Random from eth2spec.debug import random_value, encode from eth2spec.phase0 import spec -from eth2spec.utils.minimal_ssz import hash_tree_root, serialize +from eth2spec.utils.minimal_ssz import ( + hash_tree_root, + signing_root, + serialize, +) from eth_utils import ( to_tuple, to_dict ) @@ -21,6 +25,8 @@ def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMod yield "value", encode.encode(value, typ) yield "serialized", '0x' + serialize(value).hex() yield "root", '0x' + hash_tree_root(value).hex() + if hasattr(value, "signature"): + yield "signing_root", '0x' + signing_root(value).hex() @to_tuple From 8f9133c8c3b8d3e8665f0a3b93b1454ed7849d97 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 20 Apr 2019 11:33:15 +1000 Subject: [PATCH 401/481] update CI config: caching of repo and venv, and split install from tests run --- .circleci/config.yml | 168 +++++++++++++++++++------------------ Makefile | 16 ++-- test_libs/pyspec/README.md | 5 +- 3 files changed, 103 insertions(+), 86 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5be6ed500..9a7172866 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,89 +1,97 @@ version: 2.1 +commands: + restore_cached_venv: + description: "Restores a cached venv" + parameters: + reqs_checksum: + type: string + default: "1234" + venv_name: + type: string + default: "default-name" + steps: + - restore_cache: + keys: + - << parameters.venv_name >>-venv-<< parameters.reqs_checksum >> + # fallback to using the latest cache if no exact match is found + - << parameters.venv_name >>-venv- + save_cached_venv: + description: "Saves a venv into a cache" + parameters: + reqs_checksum: + type: string + default: "1234" + venv_path: + type: string + default: "venv" + venv_name: + type: string + default: "default-name" + steps: + - save_cache: + key: << parameters.venv_name >>-venv-<< parameters.reqs_checksum >> + paths: << parameters.venv_path >> jobs: - build: + checkout_specs: docker: - image: circleci/python:3.6 - working_directory: ~/repo - + working_directory: ~/specs-repo steps: + # Restore git repo at point close to target branch/revision, to speed up checkout + - restore_cache: + keys: + - v1-specs-repo-{{ .Branch }}-{{ .Revision }} + - v1-specs-repo-{{ .Branch }}- + - v1-specs-repo- - checkout - run: - name: Build pyspec - command: make pyspec - + name: Clean up git repo to reduce cache size + command: git gc + # Save the git checkout as a cache, to make cloning next time faster. + - save_cache: + key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} + paths: + - ~/specs-repo + install_test: + docker: + - image: circleci/python:3.6 + working_directory: ~/specs-repo + steps: + - restore_cache: + key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} + - restore_cached_venv: + venv_name: v1-pyspec + reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}' + - run: + name: Install pyspec requirements + command: make install_test + - save_cached_venv: + venv_name: v1-pyspec + reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}' + venv_path: ./test_libs/pyspec/venv + test: + docker: + - image: circleci/python:3.6 + working_directory: ~/specs-repo + steps: + - restore_cache: + key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} + - restore_cached_venv: + venv_name: v1-pyspec + reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}' - run: name: Run py-tests - command: make test - -# TODO see #928: decide on CI triggering of yaml tests building, -# and destination of output (new yaml tests LFS-configured repository) -# -# - run: -# name: Generate YAML tests -# command: make gen_yaml_tests -# -# - store_artifacts: -# path: test-reports -# destination: test-reports -# -# - run: -# name: Save YAML tests for deployment -# command: | -# mkdir /tmp/workspace -# cp -r yaml_tests /tmp/workspace/ -# git log -1 >> /tmp/workspace/latest_commit_message -# - persist_to_workspace: -# root: /tmp/workspace -# paths: -# - yaml_tests -# - latest_commit_message -# commit: -# docker: -# - image: circleci/python:3.6 -# steps: -# - attach_workspace: -# at: /tmp/workspace -# - add_ssh_keys: -# fingerprints: -# - "01:85:b6:36:96:a6:84:72:e4:9b:4e:38:ee:21:97:fa" -# - run: -# name: Checkout test repository -# command: | -# ssh-keyscan -H github.com >> ~/.ssh/known_hosts -# git clone git@github.com:ethereum/eth2.0-tests.git -# - run: -# name: Commit and push generated YAML tests -# command: | -# cd eth2.0-tests -# git config user.name 'eth2TestGenBot' -# git config user.email '47188154+eth2TestGenBot@users.noreply.github.com' -# for filename in /tmp/workspace/yaml_tests/*; do -# rm -rf $(basename $filename) -# cp -r $filename . -# done -# git add . -# if git diff --cached --exit-code >& /dev/null; then -# echo "No changes to commit" -# else -# echo -e "Update generated tests\n\nLatest commit message from eth2.0-specs:\n" > commit_message -# cat /tmp/workspace/latest_commit_message >> commit_message -# git commit -F commit_message -# git push origin master -# fi -#workflows: -# version: 2.1 -# -# build_and_commit: -# jobs: -# - build: -# filters: -# tags: -# only: /.*/ -# - commit: -# requires: -# - build -# filters: -# tags: -# only: /.*/ -# branches: -# ignore: /.*/ \ No newline at end of file + command: make citest + - store_test_results: + path: test_libs/pyspec/test-reports +workflows: + version: 2.1 + test_spec: + jobs: + - checkout_specs + - install_test: + requires: + - checkout_specs + - test: + requires: + - install_test diff --git a/Makefile b/Makefile index b39538791..71d150983 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) -.PHONY: clean all test gen_yaml_tests pyspec phase0 +.PHONY: clean all test citest gen_yaml_tests pyspec phase0 install_test all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) @@ -27,11 +27,17 @@ clean: rm -rf $(PY_SPEC_ALL_TARGETS) # "make gen_yaml_tests" to run generators -gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) +gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) + +# installs the packages to run pyspec tests +install_test: + cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; -# runs a limited set of tests against a minimal config test: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python -m pytest -m minimal_config . + cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest -m minimal_config . + +citest: $(PY_SPEC_ALL_TARGETS) + cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml -m minimal_config . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) @@ -69,5 +75,5 @@ $(YAML_TEST_DIR): # For any target within the tests dir, build it using the build_yaml_tests function. # (creation of output dir is a dependency) -$(YAML_TEST_DIR)%: $(YAML_TEST_DIR) +$(YAML_TEST_DIR)%: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(call build_yaml_tests,$*) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index b3cab11d2..20c01bde4 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -19,6 +19,8 @@ Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2sp ## Py-tests +After building, you can install the dependencies for running the `pyspec` tests with `make install_test` + These tests are not intended for client-consumption. These tests are sanity tests, to verify if the spec itself is consistent. @@ -38,8 +40,9 @@ python3 -m venv venv . venv/bin/activate pip3 install -r requirements.txt ``` -Note: make sure to run `make pyspec` from the root of the specs repository, +Note: make sure to run `make -B pyspec` from the root of the specs repository, to build the parts of the pyspec module derived from the markdown specs. +The `-B` flag may be helpful to force-overwrite the `pyspec` output after you made a change to the markdown source files. Run the tests: ``` From 2b171b19c410d4242b8d750708e45111c1935d09 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 20 Apr 2019 12:18:56 +1000 Subject: [PATCH 402/481] fix generator --- test_generators/operations/deposits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py index 454c6f22d..bd9c392a5 100644 --- a/test_generators/operations/deposits.py +++ b/test_generators/operations/deposits.py @@ -29,7 +29,7 @@ def build_deposit_data(state, message_hash=signing_root(deposit_data), privkey=privkey, domain=spec.get_domain( - state.fork, + state, spec.get_current_epoch(state), spec.DOMAIN_DEPOSIT, ) From 55aa12d7bd5bb4a9d8324b4f26fd3c9d3df8b173 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 20 Apr 2019 12:23:10 +1000 Subject: [PATCH 403/481] parallelism support for make gen_yaml_tests --- Makefile | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index b39538791..0a3e03e33 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ clean: rm -rf $(PY_SPEC_ALL_TARGETS) # "make gen_yaml_tests" to run generators -gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) +gen_yaml_tests: $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config test: $(PY_SPEC_ALL_TARGETS) @@ -48,24 +48,30 @@ CURRENT_DIR = ${CURDIR} # The function that builds a set of suite files, by calling a generator for the given type (param 1) define build_yaml_tests - $(info running generator $(1)) - # Create the output - mkdir -p $(YAML_TEST_DIR)$(1) - - # 1) Create a virtual environment - # 2) Activate the venv, this is where dependencies are installed for the generator - # 3) Install all the necessary requirements - # 4) Run the generator. The generator is assumed to have an "main.py" file. - # 5) We output to the tests dir (generator program should accept a "-o " argument. - cd $(GENERATOR_DIR)$(1); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR) - - $(info generator $(1) finished) + # Started! + # Create output directory + # Navigate to the generator + # Create a virtual environment, if it does not exist already + # Activate the venv, this is where dependencies are installed for the generator + # Install all the necessary requirements + # Run the generator. The generator is assumed to have an "main.py" file. + # We output to the tests dir (generator program should accept a "-o " argument. + echo "generator $(1) started"; \ + mkdir -p $(YAML_TEST_DIR)$(1); \ + cd $(GENERATOR_DIR)$(1); \ + if test -d venv; then python3 -m venv venv; fi; \ + . venv/bin/activate; \ + pip3 install -r requirements.txt; \ + python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR); \ + echo "generator $(1) finished" endef # The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary) $(YAML_TEST_DIR): $(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS}) mkdir -p $@ +$(YAML_TEST_DIR)/: + $(info ignoring duplicate yaml tests dir) # For any target within the tests dir, build it using the build_yaml_tests function. # (creation of output dir is a dependency) From 69ab4140decb7f98a1143dcd4582a2b872de18c9 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 20 Apr 2019 12:25:24 +1000 Subject: [PATCH 404/481] Add note on parallelism --- test_generators/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test_generators/README.md b/test_generators/README.md index 743157aae..66534e5a8 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -28,9 +28,12 @@ make clean This runs all the generators. ```bash -make gen_yaml_tests +make -j 4 gen_yaml_tests ``` +The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores. + + ### Running a single generator The make file auto-detects generators in the `test_generators/` directory, From 14ff452314d566a595ccbdaf2f4cca9fc161c69e Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 20 Apr 2019 12:28:50 +1000 Subject: [PATCH 405/481] move yaml output target --- .gitignore | 2 +- Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index ce047240a..3dd86fc80 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ venv build/ output/ -yaml_tests/ +eth2.0-spec-tests/ .pytest_cache # Dynamically built from Markdown spec diff --git a/Makefile b/Makefile index 0a3e03e33..b93c7ea95 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts TEST_LIBS_DIR = ./test_libs PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec -YAML_TEST_DIR = ./yaml_tests +YAML_TEST_DIR = ./eth2.0-spec-tests/tests GENERATOR_DIR = ./test_generators CONFIGS_DIR = ./configs From f908c8d3e085432e8ec1dee8858cb9acff6eccaf Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 15:17:33 +1000 Subject: [PATCH 406/481] Revamped balances and incentivisation --- specs/core/0_beacon-chain.md | 224 ++++++++---------- specs/light_client/sync_protocol.md | 4 +- .../eth2spec/phase0/state_transition.py | 2 - .../test_process_attester_slashing.py | 2 +- .../block_processing/test_process_deposit.py | 2 +- .../test_process_proposer_slashing.py | 2 +- test_libs/pyspec/tests/helpers.py | 2 + test_libs/pyspec/tests/test_sanity.py | 8 +- 8 files changed, 106 insertions(+), 140 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 184811b52..84e9e4230 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -59,8 +59,6 @@ - [`is_active_validator`](#is_active_validator) - [`is_slashable_validator`](#is_slashable_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - - [`get_balance`](#get_balance) - - [`set_balance`](#set_balance) - [`increase_balance`](#increase_balance) - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) @@ -178,7 +176,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MAX_INDICES_PER_ATTESTATION` | `2**12` (= 4,096) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | -| `BASE_REWARDS_PER_EPOCH` | `4` | +| `BASE_REWARDS_PER_EPOCH` | `5` | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -410,7 +408,7 @@ The types are defined topologically to aid in facilitating an executable version # Was the validator slashed 'slashed': 'bool', # Rounded balance - 'high_balance': 'uint64' + 'effective_balance': 'uint64', } ``` @@ -733,29 +731,19 @@ def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[Valid return [i for i, v in enumerate(state.validator_registry) if is_active_validator(v, epoch)] ``` -### `get_balance` +### `get_next_epoch_effective_balance` ```python -def get_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: +def get_next_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> None: """ - Return the balance for a validator with the given ``index``. - """ - return state.balances[index] -``` - -### `set_balance` - -```python -def set_balance(state: BeaconState, index: ValidatorIndex, balance: Gwei) -> None: - """ - Set the balance for a validator with the given ``index`` in both ``BeaconState`` - and validator's rounded balance ``high_balance``. + Get validator effective balance for the next epoch """ + balance = min(state.balances[index], MAX_DEPOSIT_AMOUNT) validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 - if validator.high_balance > balance or validator.high_balance + 3 * HALF_INCREMENT < balance: - validator.high_balance = balance - balance % HIGH_BALANCE_INCREMENT - state.balances[index] = balance + if validator.effective_balance > balance or validator.effective_balance + 3 * HALF_INCREMENT < balance: + return balance - balance % HIGH_BALANCE_INCREMENT + return validator.effective_balance ``` ### `increase_balance` @@ -763,9 +751,9 @@ def set_balance(state: BeaconState, index: ValidatorIndex, balance: Gwei) -> Non ```python def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: """ - Increase the balance for a validator with the given ``index`` by ``delta``. + Increase validator balance by ``delta``. """ - set_balance(state, index, get_balance(state, index) + delta) + state.balances[index] += delta ``` ### `decrease_balance` @@ -773,11 +761,9 @@ def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> ```python def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: """ - Decrease the balance for a validator with the given ``index`` by ``delta``. - Set to ``0`` when underflow. + Decrease validator balance by ``delta`` with underflow protection. """ - current_balance = get_balance(state, index) - set_balance(state, index, current_balance - delta if current_balance >= delta else 0) + state.balances[index] = state.balances[index] - delta if state.balances[index] >= delta else 0 ``` ### `get_permuted_index` @@ -969,18 +955,17 @@ def generate_seed(state: BeaconState, ### `get_beacon_proposer_index` ```python -def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: +def get_beacon_proposer_index(state: BeaconState, slot: Slot=None) -> ValidatorIndex: """ - Return the beacon proposer index at ``state.slot``. + Return the beacon proposer index at ``slot``. """ current_epoch = get_current_epoch(state) - - first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, slot if slot != None else state.slot)[0] i = 0 while True: candidate = first_committee[(current_epoch + i) % len(first_committee)] random_byte = hash(generate_seed(state, current_epoch) + int_to_bytes8(i // 32))[i % 32] - if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: + if get_effective_balance(state, candidate, current_epoch) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: return candidate i += 1 ``` @@ -1031,21 +1016,21 @@ def bytes_to_int(data: bytes) -> int: ### `get_effective_balance` ```python -def get_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: +def get_effective_balance(state: BeaconState, index: ValidatorIndex, epoch: Epoch) -> Gwei: """ Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. """ - return min(get_balance(state, index), MAX_DEPOSIT_AMOUNT) + return state.validator_registry[index].effective_balance if epoch == get_current_epoch(state) else get_next_epoch_effective_balance(state, index) ``` ### `get_total_balance` ```python -def get_total_balance(state: BeaconState, validators: List[ValidatorIndex]) -> Gwei: +def get_total_balance(state: BeaconState, validators: List[ValidatorIndex], epoch: Epoch) -> Gwei: """ Return the combined effective balance of an array of ``validators``. """ - return sum([get_effective_balance(state, i) for i in validators]) + return sum([get_effective_balance(state, i, epoch) for i in validators]) ``` ### `get_fork_version` @@ -1290,11 +1275,12 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. """ + current_epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) state.validator_registry[slashed_index].slashed = True - state.validator_registry[slashed_index].withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH - slashed_balance = get_effective_balance(state, slashed_index) - state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance + state.validator_registry[slashed_index].withdrawable_epoch = current_epoch + LATEST_SLASHED_EXIT_LENGTH + slashed_balance = get_effective_balance(state, slashed_index, current_epoch) + state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: @@ -1447,8 +1433,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], process_deposit(state, deposit) # Process genesis activations - for index in range(len(state.validator_registry)): - if get_effective_balance(state, index) >= MAX_DEPOSIT_AMOUNT: + for index, validator in enumerate(state.validator_registry): + if validator.effective_balance >= MAX_DEPOSIT_AMOUNT: activate_validator(state, index) genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) @@ -1520,7 +1506,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) # made for optimized implementations that precompute and save data def get_vote_count(block: BeaconBlock) -> int: return sum( - start_state.validator_registry[validator_index].high_balance + start_state.validator_registry[validator_index].effective_balance for validator_index, target in attestation_targets if get_ancestor(store, target, block.slot) == block ) @@ -1581,38 +1567,27 @@ The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SL We define epoch transition helper functions: ```python -def get_previous_epoch_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state, get_previous_epoch(state))) +def get_total_active_balance(state: BeaconState, epoch: Epoch) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state, epoch), epoch) ``` -Note: The balance computed by `get_previous_epoch_total_balance` may be different to the actual total balance during the previous epoch transition. Due to the bounds on per-epoch validator churn and per-epoch rewards/penalties, the maximum balance difference is low and only marginally affects consensus safety. - ```python -def get_current_epoch_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) +def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: + return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations ``` - ```python -def get_current_epoch_matching_target_attestations(state: BeaconState) -> List[PendingAttestation]: +def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: return [ - a for a in state.current_epoch_attestations - if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_current_epoch(state))) + a for a in get_matching_source_attestations(state, epoch) + if a.data.target_root == get_block_root(state, get_epoch_start_slot(epoch)) ] ``` ```python -def get_previous_epoch_matching_target_attestations(state: BeaconState) -> List[PendingAttestation]: +def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: return [ - a for a in state.previous_epoch_attestations - if a.data.target_root == get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) - ] -``` - -```python -def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[PendingAttestation]: - return [ - a for a in state.previous_epoch_attestations + a for a in get_matching_source_attestations(state, epoch) if a.data.beacon_block_root == get_block_root(state, a.data.slot) ] ``` @@ -1626,8 +1601,8 @@ def get_unslashed_attesting_indices(state: BeaconState, attestations: List[Pendi ``` ```python -def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: - return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) +def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation], epoch: Epoch) -> Gwei: + return get_total_balance(state, get_unslashed_attesting_indices(state, attestations), epoch) ``` ```python @@ -1641,8 +1616,8 @@ def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationDat ```python def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: - pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations - shard_attestations = [a for a in pending_attestations if a.data.shard == shard] + attestations = get_matching_source_attestations(state, epoch) + shard_attestations = [a for a in attestations if a.data.shard == shard] shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] candidate_crosslinks = [ c for c in shard_crosslinks @@ -1655,7 +1630,7 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink] # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) winning_crosslink = max(candidate_crosslinks, key=lambda crosslink: ( - get_attesting_balance(state, get_attestations_for(crosslink)), crosslink.crosslink_data_root + get_attesting_balance(state, get_attestations_for(crosslink), epoch), crosslink.crosslink_data_root )) return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink)) @@ -1684,13 +1659,15 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_epoch = state.current_justified_epoch state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 - previous_epoch_matching_target_balance = get_attesting_balance(state, get_previous_epoch_matching_target_attestations(state)) - if previous_epoch_matching_target_balance * 3 >= get_previous_epoch_total_balance(state) * 2: + epoch = get_previous_epoch(state) + previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, epoch), epoch) + if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state, epoch) * 2: state.current_justified_epoch = get_previous_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 1) - current_epoch_matching_target_balance = get_attesting_balance(state, get_current_epoch_matching_target_attestations(state)) - if current_epoch_matching_target_balance * 3 >= get_current_epoch_total_balance(state) * 2: + epoch = get_current_epoch(state) + current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, epoch), epoch) + if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state, epoch) * 2: state.current_justified_epoch = get_current_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 0) @@ -1726,9 +1703,10 @@ def process_crosslinks(state: BeaconState) -> None: previous_epoch = get_previous_epoch(state) next_epoch = get_current_epoch(state) + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): + epoch = slot_to_epoch(slot) for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) - if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee): + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) + if 3 * get_total_balance(state, attesting_indices, epoch) >= 2 * get_total_balance(state, crosslink_committee, epoch): state.current_crosslinks[shard] = winning_crosslink ``` @@ -1737,52 +1715,53 @@ def process_crosslinks(state: BeaconState) -> None: First, we define additional helpers: ```python -def get_base_reward(state: BeaconState, total_balance: Gwei, index: ValidatorIndex) -> Gwei: +def get_base_reward(state: BeaconState, index: ValidatorIndex, epoch: Epoch) -> Gwei: + total_balance = get_total_active_balance(state, epoch) if total_balance == 0: return 0 adjusted_quotient = integer_squareroot(total_balance) // BASE_REWARD_QUOTIENT - return get_effective_balance(state, index) // adjusted_quotient // BASE_REWARDS_PER_EPOCH + return get_effective_balance(state, index, epoch) // adjusted_quotient // BASE_REWARDS_PER_EPOCH ``` ```python -def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - previous_epoch = get_previous_epoch(state) - eligible_validators = [ +def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: + epoch = get_previous_epoch(state) + eligible_validator_indices = [ index for index, validator in enumerate(state.validator_registry) - if ( - is_active_validator(validator, previous_epoch) or - (validator.slashed and previous_epoch < validator.withdrawable_epoch) - ) + if is_active_validator(validator, epoch) or (validator.slashed and epoch < validator.withdrawable_epoch) ] rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] - for index in eligible_validators: - base_reward = get_base_reward(state, get_previous_epoch_total_balance(state), index) + for index in eligible_validator_indices: + base_reward = get_base_reward(state, index, epoch) - # Micro-incentives for matching FFG source, matching FFG target, and matching head + # Micro-incentives for attestations matching FFG source, FFG target, and head for attestations in ( - state.previous_epoch_attestations, # Matching FFG source - get_previous_epoch_matching_target_attestations(state), # Matching FFG target - get_previous_epoch_matching_head_attestations(state), # Matching head + get_matching_source_attestations(state, epoch), + get_matching_target_attestations(state, epoch), + get_matching_source_attestations(state, epoch), ): if index in get_unslashed_attesting_indices(state, attestations): - rewards[index] += base_reward * get_attesting_balance(state, attestations) // get_previous_epoch_total_balance(state) + rewards[index] += base_reward * get_attesting_balance(state, attestations, epoch) // get_total_active_balance(state, epoch) else: penalties[index] += base_reward - # Inclusion delay micro-penalty - if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): - earliest_attestation = get_earliest_attestation(state, state.previous_epoch_attestations, index) + if index in get_unslashed_attesting_indices(state, get_matching_source_attestations(state, epoch)): + earliest_attestation = get_earliest_attestation(state, get_matching_source_attestations(state, epoch), index) + # Proposer micro-rewards + proposer_index = get_beacon_proposer_index(state, earliest_attestation.inclusion_slot) + rewards[proposer_index] += base_reward // PROPOSER_REWARD_QUOTIENT + # Inclusion delay micro-rewards inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot - penalties[index] += base_reward * (inclusion_delay - MIN_ATTESTATION_INCLUSION_DELAY) // (SLOTS_PER_EPOCH - MIN_ATTESTATION_INCLUSION_DELAY) + rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay # Inactivity penalty - epochs_since_finality = previous_epoch - state.finalized_epoch - if epochs_since_finality > MIN_EPOCHS_TO_INACTIVITY_PENALTY: + finality_delay = epoch - state.finalized_epoch + if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY: penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward - if index not in get_unslashed_attesting_indices(state, get_previous_epoch_matching_target_attestations(state)): - penalties[index] += get_effective_balance(state, index) * epochs_since_finality // INACTIVITY_PENALTY_QUOTIENT + if index not in get_unslashed_attesting_indices(state, get_matching_target_attestations(state, epoch)): + penalties[index] += get_effective_balance(state, index, epoch) * finality_delay // INACTIVITY_PENALTY_QUOTIENT return [rewards, penalties] ``` @@ -1792,12 +1771,13 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))): + epoch = slot_to_epoch(slot) for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) - attesting_balance = get_total_balance(state, attesting_indices) - committee_balance = get_total_balance(state, crosslink_committee) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) + attesting_balance = get_total_balance(state, attesting_indices, epoch) + committee_balance = get_total_balance(state, crosslink_committee, epoch) for index in crosslink_committee: - base_reward = get_base_reward(state, get_previous_epoch_total_balance(state), index) + base_reward = get_base_reward(state, index, epoch) if index in attesting_indices: rewards[index] += base_reward * attesting_balance // committee_balance else: @@ -1812,7 +1792,7 @@ def process_rewards_and_penalties(state: BeaconState) -> None: if get_current_epoch(state) == GENESIS_EPOCH: return - rewards1, penalties1 = get_justification_and_finalization_deltas(state) + rewards1, penalties1 = get_attestation_deltas(state) rewards2, penalties2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): increase_balance(state, i, rewards1[i] + rewards2[i]) @@ -1827,11 +1807,10 @@ Run the following function: def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validator_registry): - balance = get_balance(state, index) - if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: + if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance >= MAX_DEPOSIT_AMOUNT: validator.activation_eligibility_epoch = get_current_epoch(state) - if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: + if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance < EJECTION_BALANCE: initiate_validator_exit(state, index) # Process activations @@ -1852,7 +1831,7 @@ Run the following function: def process_slashings(state: BeaconState) -> None: current_epoch = get_current_epoch(state) active_validator_indices = get_active_validator_indices(state, current_epoch) - total_balance = get_total_balance(state, active_validator_indices) + total_balance = get_total_balance(state, active_validator_indices, current_epoch) # Compute `total_penalties` total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] @@ -1862,8 +1841,8 @@ def process_slashings(state: BeaconState) -> None: for index, validator in enumerate(state.validator_registry): if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: penalty = max( - get_effective_balance(state, index) * min(total_penalties * 3, total_balance) // total_balance, - get_effective_balance(state, index) // MIN_PENALTY_QUOTIENT + get_effective_balance(state, index, current_epoch) * min(total_penalties * 3, total_balance) // total_balance, + get_effective_balance(state, index, current_epoch) // MIN_PENALTY_QUOTIENT ) decrease_balance(state, index, penalty) ``` @@ -1879,6 +1858,9 @@ def process_final_updates(state: BeaconState) -> None: # Reset eth1 data votes if state.slot % SLOTS_PER_ETH1_VOTING_PERIOD == 0: state.eth1_data_votes = [] + # Update effective balances + for index, validator in enumerate(state.validator_registry): + validator.effective_balance = get_next_epoch_effective_balance(state, index) # Update start shard state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT # Set active index root @@ -2079,18 +2061,6 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: state.previous_epoch_attestations.append(pending_attestation) ``` -Run `process_proposer_attestation_rewards(state)`. - -```python -def process_proposer_attestation_rewards(state: BeaconState) -> None: - proposer_index = get_beacon_proposer_index(state) - for pending_attestations in (state.previous_epoch_attestations, state.current_epoch_attestations): - for index in get_unslashed_attesting_indices(state, pending_attestations): - if get_earliest_attestation(state, pending_attestations, index).inclusion_slot == state.slot: - base_reward = get_base_reward(state, get_current_epoch_total_balance(state), index) - increase_balance(state, proposer_index, base_reward // PROPOSER_REWARD_QUOTIENT) -``` - ##### Deposits Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)`. @@ -2153,13 +2123,11 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, slashed=False, - high_balance=0 + effective_balance=amount - amount % HIGH_BALANCE_INCREMENT, ) - # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. state.validator_registry.append(validator) - state.balances.append(0) - set_balance(state, len(state.validator_registry) - 1, amount) + state.balances.append(amount) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) @@ -2212,8 +2180,8 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: Process ``Transfer`` operation. Note that this function mutates ``state``. """ - # Verify the amount and fee aren't individually too big (for anti-overflow purposes) - assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) + # Verify the amount and fee are not individually too big (for anti-overflow purposes) + assert state.balances[transfer.sender] >= max(transfer.amount, transfer.fee) # A transfer is valid in only one slot assert state.slot == transfer.slot # Only withdrawn or not-yet-deposited accounts can transfer @@ -2238,8 +2206,8 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: increase_balance(state, transfer.recipient, transfer.amount) increase_balance(state, get_beacon_proposer_index(state), transfer.fee) # Verify balances are not dust - assert not (0 < get_balance(state, transfer.sender) < MIN_DEPOSIT_AMOUNT) - assert not (0 < get_balance(state, transfer.recipient) < MIN_DEPOSIT_AMOUNT) + assert not (0 < state.balances[transfer.sender] < MIN_DEPOSIT_AMOUNT) + assert not (0 < state.balances[transfer.recipient] < MIN_DEPOSIT_AMOUNT) ``` #### State root verification diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 900b2e64f..7b8388583 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -180,8 +180,8 @@ def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: Val assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header) committee = compute_committee(proof.header, validator_memory) # Verify that we have >=50% support - support_balance = sum([v.high_balance for i, v in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) - total_balance = sum([v.high_balance for i, v in enumerate(committee)]) + support_balance = sum([v.effective_balance for i, v in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) + total_balance = sum([v.effective_balance for i, v in enumerate(committee)]) assert support_balance * 2 > total_balance # Verify shard attestations group_public_key = bls_aggregate_pubkeys([ diff --git a/test_libs/pyspec/eth2spec/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py index 38ecd2a02..1bef358d4 100644 --- a/test_libs/pyspec/eth2spec/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -11,7 +11,6 @@ from .spec import ( BeaconState, BeaconBlock, Slot, - process_proposer_attestation_rewards, ) @@ -52,7 +51,6 @@ def process_operations(state: BeaconState, block: BeaconBlock) -> None: spec.MAX_ATTESTATIONS, spec.process_attestation, ) - process_proposer_attestation_rewards(state) assert len(block.body.deposits) == expected_deposit_count(state) process_operation_type( diff --git a/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py index 84c19145a..bcaf6fb7a 100644 --- a/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py +++ b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py @@ -3,11 +3,11 @@ import pytest import eth2spec.phase0.spec as spec from eth2spec.phase0.spec import ( - get_balance, get_beacon_proposer_index, process_attester_slashing, ) from tests.helpers import ( + get_balance, get_valid_attester_slashing, next_epoch, ) diff --git a/test_libs/pyspec/tests/block_processing/test_process_deposit.py b/test_libs/pyspec/tests/block_processing/test_process_deposit.py index 4031e650d..4fb8b3a1e 100644 --- a/test_libs/pyspec/tests/block_processing/test_process_deposit.py +++ b/test_libs/pyspec/tests/block_processing/test_process_deposit.py @@ -4,11 +4,11 @@ import pytest import eth2spec.phase0.spec as spec from eth2spec.phase0.spec import ( - get_balance, ZERO_HASH, process_deposit, ) from tests.helpers import ( + get_balance, build_deposit, privkeys, pubkeys, diff --git a/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py index 6d5f3045d..475221036 100644 --- a/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py +++ b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py @@ -3,11 +3,11 @@ import pytest import eth2spec.phase0.spec as spec from eth2spec.phase0.spec import ( - get_balance, get_current_epoch, process_proposer_slashing, ) from tests.helpers import ( + get_balance, get_valid_proposer_slashing, ) diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 44d2dcb4d..616f3b797 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -51,6 +51,8 @@ privkeys = [i + 1 for i in range(1000)] pubkeys = [bls.privtopub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} +def get_balance(state, index): + return state.balances[index] def set_bitfield_bit(bitfield, i): """ diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 29333a7ad..508b07905 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -16,7 +16,6 @@ from eth2spec.phase0.spec import ( VoluntaryExit, # functions get_active_validator_indices, - get_balance, get_beacon_proposer_index, get_block_root, get_current_epoch, @@ -24,7 +23,6 @@ from eth2spec.phase0.spec import ( get_state_root, advance_slot, cache_state, - set_balance, slot_to_epoch, verify_merkle_branch, hash, @@ -38,6 +36,7 @@ from eth2spec.utils.merkle_minimal import ( get_merkle_root, ) from .helpers import ( + get_balance, build_deposit_data, build_empty_block_for_next_slot, fill_aggregate_attestation, @@ -53,7 +52,6 @@ from .helpers import ( # mark entire file as 'sanity' pytestmark = pytest.mark.sanity - def check_finality(state, prev_state, current_justified_changed, @@ -304,6 +302,7 @@ def test_deposit_top_up(state): def test_attestation(state): + state.slot = spec.SLOTS_PER_EPOCH test_state = deepcopy(state) attestation = get_valid_attestation(state) @@ -318,7 +317,6 @@ def test_attestation(state): assert len(test_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 proposer_index = get_beacon_proposer_index(test_state) - assert test_state.balances[proposer_index] > state.balances[proposer_index] # # Epoch transition should move to previous_epoch_attestations @@ -443,7 +441,7 @@ def test_balance_driven_status_transitions(state): assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH # set validator balance to below ejection threshold - set_balance(pre_state, validator_index, spec.EJECTION_BALANCE - 1) + pre_state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE - 1 post_state = deepcopy(pre_state) # From 8c59bfd9be703a6b31c969e46a2a26d4c0c8b51f Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Sat, 20 Apr 2019 00:18:14 -0500 Subject: [PATCH 407/481] Update simple-serialize.md (#969) --- specs/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index f30d34709..6ccb8f22d 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -34,11 +34,11 @@ This is a **work in progress** describing typing, serialization and Merkleizatio ### Composite types -* **container**: ordered heterogenous collection of values +* **container**: ordered heterogeneous collection of values * key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}` * **vector**: ordered fixed-length homogeneous collection of values * angle bracket notation `[type, N]`, e.g. `["uint64", N]` -* **list**: ordered variable-length homogenous collection of values +* **list**: ordered variable-length homogeneous collection of values * angle bracket notation `[type]`, e.g. `["uint64"]` We recursively define "variable-size" types to be lists and all types that contains a variable-size type. All other types are said to be "fixed-size". From 4d26ae255a1c9127fe4cc9dc3ed2ac830b1bf1f3 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 15:31:15 +1000 Subject: [PATCH 408/481] Bug fix --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 84e9e4230..ba17691bc 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -731,10 +731,10 @@ def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[Valid return [i for i, v in enumerate(state.validator_registry) if is_active_validator(v, epoch)] ``` -### `get_next_epoch_effective_balance` +### `get_current_epoch_effective_balance` ```python -def get_next_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> None: +def get_current_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> None: """ Get validator effective balance for the next epoch """ @@ -1020,7 +1020,7 @@ def get_effective_balance(state: BeaconState, index: ValidatorIndex, epoch: Epoc """ Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. """ - return state.validator_registry[index].effective_balance if epoch == get_current_epoch(state) else get_next_epoch_effective_balance(state, index) + return get_current_epoch_effective_balance(state, index) if epoch == get_current_epoch(state) else state.validator_registry[index].effective_balance ``` ### `get_total_balance` @@ -1860,7 +1860,7 @@ def process_final_updates(state: BeaconState) -> None: state.eth1_data_votes = [] # Update effective balances for index, validator in enumerate(state.validator_registry): - validator.effective_balance = get_next_epoch_effective_balance(state, index) + validator.effective_balance = get_current_epoch_effective_balance(state, index) # Update start shard state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT # Set active index root From f07b94e77c0c2453050905ea9cf8e51ada34ff2c Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 15:37:12 +1000 Subject: [PATCH 409/481] Fixes --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 27e3b8663..e5ef8af1b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -937,13 +937,13 @@ def get_beacon_proposer_index(state: BeaconState, slot: Slot=None) -> ValidatorI """ Return the beacon proposer index at ``slot``. """ - current_epoch = get_current_epoch(state) + epoch = slot_to_epoch(slot if slot != None else state.slot) first_committee, _ = get_crosslink_committees_at_slot(state, slot if slot != None else state.slot)[0] i = 0 while True: - candidate = first_committee[(current_epoch + i) % len(first_committee)] - random_byte = hash(generate_seed(state, current_epoch) + int_to_bytes8(i // 32))[i % 32] - if get_effective_balance(state, candidate, current_epoch) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: + candidate = first_committee[(epoch + i) % len(first_committee)] + random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32] + if get_effective_balance(state, candidate, epoch) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: return candidate i += 1 ``` From 1a95996035bf59a91678a3c23c683370a1d3e72f Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Sat, 20 Apr 2019 01:01:06 -0500 Subject: [PATCH 410/481] i.e. + e.g. standardization (#970) --- specs/bls_signature.md | 2 +- specs/light_client/merkle_proofs.md | 2 +- specs/light_client/sync_protocol.md | 2 +- specs/validator/0_beacon-chain-validator.md | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/bls_signature.md b/specs/bls_signature.md index 14a4f1cb7..beef19df5 100644 --- a/specs/bls_signature.md +++ b/specs/bls_signature.md @@ -88,7 +88,7 @@ def hash_to_G2(message_hash: Bytes32, domain: uint64) -> [uint384]: `modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions the one with higher imaginary component is favored; if both solutions have equal imaginary component the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored). -The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (ie. integers `mod q`) and converts it to a regular integer. +The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (i.e. integers `mod q`) and converts it to a regular integer. ```python Fq2_order = q ** 2 - 1 diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index 63c018f2f..b38167bb5 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -102,7 +102,7 @@ def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int] ## Merkle multiproofs -We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14): +We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (i.e. generalized indices 8, 9, 14): ``` . diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 900b2e64f..257590f4d 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -27,7 +27,7 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers ### Expansions -We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`). +We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (e.g. `BeaconBlockHeader` is a summary of `BeaconBlock`). We define two expansions: diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 632bf2b62..cb19097dd 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -60,7 +60,7 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers ## Introduction -This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (ie. the functionality of following and reading the beacon chain) and a "validator client" (ie. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope. +This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope. A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof of work networks in which a miner provides collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol. @@ -141,7 +141,7 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). -There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (eg. at 312500 validators = 10 million ETH, that's once per ~3 weeks). +There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks). #### Block header From d700ea44066ce3efb47f58e158bc184510f266ab Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 16:10:25 +1000 Subject: [PATCH 411/481] Fixes --- specs/core/0_beacon-chain.md | 50 ++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index e5ef8af1b..c1dd75fb0 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -58,6 +58,7 @@ - [`is_active_validator`](#is_active_validator) - [`is_slashable_validator`](#is_slashable_validator) - [`get_active_validator_indices`](#get_active_validator_indices) + - [`get_current_epoch_effective_balance`](#get_current_epoch_effective_balance) - [`increase_balance`](#increase_balance) - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) @@ -712,7 +713,7 @@ def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[Valid ### `get_current_epoch_effective_balance` ```python -def get_current_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> None: +def get_current_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Get validator effective balance for the next epoch """ @@ -1008,7 +1009,7 @@ def get_total_balance(state: BeaconState, validators: List[ValidatorIndex], epoc """ Return the combined effective balance of an array of ``validators``. """ - return sum([get_effective_balance(state, i, epoch) for i in validators]) + return sum([get_effective_balance(state, index, epoch) for index in validators]) ``` ### `get_domain` @@ -1547,6 +1548,8 @@ def process_justification_and_finalization(state: BeaconState) -> None: if get_current_epoch(state) <= GENESIS_EPOCH + 1: return + previous_epoch = get_previous_epoch(state) + current_epoch = get_current_epoch(state) old_previous_justified_epoch = state.previous_justified_epoch old_current_justified_epoch = state.current_justified_epoch @@ -1554,22 +1557,19 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_epoch = state.current_justified_epoch state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 - epoch = get_previous_epoch(state) - previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, epoch), epoch) - if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state, epoch) * 2: + previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, previous_epoch), previous_epoch) + if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state, previous_epoch) * 2: state.current_justified_epoch = get_previous_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 1) - epoch = get_current_epoch(state) - current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, epoch), epoch) - if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state, epoch) * 2: + current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, current_epoch), current_epoch) + if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state, current_epoch) * 2: state.current_justified_epoch = get_current_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 0) # Process finalizations bitfield = state.justification_bitfield - current_epoch = get_current_epoch(state) # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: state.finalized_epoch = old_previous_justified_epoch @@ -1611,39 +1611,39 @@ First, we define additional helpers: ```python def get_base_reward(state: BeaconState, index: ValidatorIndex, epoch: Epoch) -> Gwei: - total_balance = get_total_active_balance(state, epoch) - if total_balance == 0: + adjusted_quotient = integer_squareroot(get_total_active_balance(state, epoch)) // BASE_REWARD_QUOTIENT + if adjusted_quotient == 0: return 0 - - adjusted_quotient = integer_squareroot(total_balance) // BASE_REWARD_QUOTIENT return get_effective_balance(state, index, epoch) // adjusted_quotient // BASE_REWARDS_PER_EPOCH + ``` ```python def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: - epoch = get_previous_epoch(state) + previous_epoch = get_previous_epoch(state) + total_balance = get_total_active_balance(state, previous_epoch) eligible_validator_indices = [ index for index, validator in enumerate(state.validator_registry) - if is_active_validator(validator, epoch) or (validator.slashed and epoch < validator.withdrawable_epoch) + if is_active_validator(validator, previous_epoch) or (validator.slashed and previous_epoch < validator.withdrawable_epoch) ] rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] for index in eligible_validator_indices: - base_reward = get_base_reward(state, index, epoch) + base_reward = get_base_reward(state, index, previous_epoch) # Micro-incentives for attestations matching FFG source, FFG target, and head for attestations in ( - get_matching_source_attestations(state, epoch), - get_matching_target_attestations(state, epoch), - get_matching_source_attestations(state, epoch), + get_matching_source_attestations(state, previous_epoch), + get_matching_target_attestations(state, previous_epoch), + get_matching_source_attestations(state, previous_epoch), ): if index in get_unslashed_attesting_indices(state, attestations): - rewards[index] += base_reward * get_attesting_balance(state, attestations, epoch) // get_total_active_balance(state, epoch) + rewards[index] += base_reward * get_attesting_balance(state, attestations, previous_epoch) // total_balance else: penalties[index] += base_reward - if index in get_unslashed_attesting_indices(state, get_matching_source_attestations(state, epoch)): - earliest_attestation = get_earliest_attestation(state, get_matching_source_attestations(state, epoch), index) + if index in get_unslashed_attesting_indices(state, get_matching_source_attestations(state, previous_epoch)): + earliest_attestation = get_earliest_attestation(state, get_matching_source_attestations(state, previous_epoch), index) # Proposer micro-rewards proposer_index = get_beacon_proposer_index(state, earliest_attestation.inclusion_slot) rewards[proposer_index] += base_reward // PROPOSER_REWARD_QUOTIENT @@ -1652,11 +1652,11 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay # Inactivity penalty - finality_delay = epoch - state.finalized_epoch + finality_delay = previous_epoch - state.finalized_epoch if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY: penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward - if index not in get_unslashed_attesting_indices(state, get_matching_target_attestations(state, epoch)): - penalties[index] += get_effective_balance(state, index, epoch) * finality_delay // INACTIVITY_PENALTY_QUOTIENT + if index not in get_unslashed_attesting_indices(state, get_matching_target_attestations(state, previous_epoch)): + penalties[index] += get_effective_balance(state, index, previous_epoch) * finality_delay // INACTIVITY_PENALTY_QUOTIENT return [rewards, penalties] ``` From 06f475a844e33d4ef13b1e4097c62b20b752bc88 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 16:32:41 +1000 Subject: [PATCH 412/481] Fixes --- specs/core/0_beacon-chain.md | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c1dd75fb0..b6b4bb13e 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -715,12 +715,12 @@ def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[Valid ```python def get_current_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: """ - Get validator effective balance for the next epoch + Get validator effective balance for the current epoch """ balance = min(state.balances[index], MAX_DEPOSIT_AMOUNT) validator = state.validator_registry[index] HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 - if validator.effective_balance > balance or validator.effective_balance + 3 * HALF_INCREMENT < balance: + if state.slot == GENESIS_SLOT or (validator.effective_balance > balance or validator.effective_balance + 3 * HALF_INCREMENT < balance): return balance - balance % HIGH_BALANCE_INCREMENT return validator.effective_balance ``` @@ -1705,7 +1705,7 @@ def process_registry_updates(state: BeaconState) -> None: if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance >= MAX_DEPOSIT_AMOUNT: validator.activation_eligibility_epoch = get_current_epoch(state) - if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance < EJECTION_BALANCE: + if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: initiate_validator_exit(state, index) # Process activations @@ -1956,30 +1956,21 @@ For each `deposit` in `block.body.deposits`, run the following function: ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: """ - Process a deposit from Ethereum 1.0. - Used to add a validator or top up an existing validator's - balance by some ``deposit`` amount. - + Process an Eth1 deposit, registering a validator or increasing its balance. Note that this function mutates ``state``. """ # Deposits must be processed in order assert deposit.index == state.deposit_index + state.deposit_index += 1 # Verify the Merkle branch - merkle_branch_is_valid = verify_merkle_branch( + assert verify_merkle_branch( leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization proof=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH, index=deposit.index, root=state.latest_eth1_data.deposit_root, ) - assert merkle_branch_is_valid - - # Increment the next deposit index we are expecting. Note that this - # needs to be done here because while the deposit contract will never - # create an invalid Merkle branch, it may admit an invalid deposit - # object, and we need to be able to skip over it - state.deposit_index += 1 validator_pubkeys = [v.pubkey for v in state.validator_registry] pubkey = deposit.data.pubkey @@ -1998,11 +1989,11 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - effective_balance=amount - amount % HIGH_BALANCE_INCREMENT, ) state.validator_registry.append(validator) state.balances.append(amount) + validator.effective_balance = get_current_epoch_effective_balance(state, len(state.validator_registry) - 1) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) From e184f0b3fe3a660e9a31a37eed146e73aa65f040 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 16:35:02 +1000 Subject: [PATCH 413/481] Fix --- specs/core/0_beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index b6b4bb13e..be0fb5d5a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1959,10 +1959,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: Process an Eth1 deposit, registering a validator or increasing its balance. Note that this function mutates ``state``. """ - # Deposits must be processed in order - assert deposit.index == state.deposit_index - state.deposit_index += 1 - # Verify the Merkle branch assert verify_merkle_branch( leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization @@ -1972,6 +1968,10 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: root=state.latest_eth1_data.deposit_root, ) + # Deposits must be processed in order + assert deposit.index == state.deposit_index + state.deposit_index += 1 + validator_pubkeys = [v.pubkey for v in state.validator_registry] pubkey = deposit.data.pubkey amount = deposit.data.amount From 7642abf1143a3be3cd74d9532a13d41440ca7f31 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 16:36:34 +1000 Subject: [PATCH 414/481] Fix| --- specs/core/0_beacon-chain.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index be0fb5d5a..a3a9aff9d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1972,26 +1972,25 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: assert deposit.index == state.deposit_index state.deposit_index += 1 - validator_pubkeys = [v.pubkey for v in state.validator_registry] pubkey = deposit.data.pubkey amount = deposit.data.amount - + validator_pubkeys = [v.pubkey for v in state.validator_registry] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): return # Add new validator - validator = Validator( + state.validator_registry.append(Validator( pubkey=pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - ) + )) - state.validator_registry.append(validator) + # Add initial balance state.balances.append(amount) validator.effective_balance = get_current_epoch_effective_balance(state, len(state.validator_registry) - 1) else: From a2a737b7289ea4bcbd9f77ca723cb197f1387828 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 20 Apr 2019 01:45:18 -0500 Subject: [PATCH 415/481] Signal non-final status of base reward and desired issuance goal --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 624413879..c0f03e4ed 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -241,7 +241,7 @@ These configurations are updated for releases, but may be out of sync during `de | `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | | `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | -* The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch. +* **The `BASE_REWARD_QUOTIENT` is NOT final. Once all other protocol details are finalized it will be adjusted, to target a theoretical maximum total issuance of `2**21` ETH per year if `2**27` ETH is validating (and therefore `2**20` per year if `2**25` ETH is validating, etc etc)** * The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. ### Max operations per block From d6644edcc9e5f25962ada04f120fbe0d63698e3d Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Sat, 20 Apr 2019 17:12:40 +1000 Subject: [PATCH 416/481] Fix test --- specs/core/0_beacon-chain.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index a3a9aff9d..32f24b2e3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1981,14 +1981,15 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: return # Add new validator - state.validator_registry.append(Validator( + validator = Validator( pubkey=pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - )) + ) + state.validator_registry.append(validator) # Add initial balance state.balances.append(amount) From 75fae6f311f27ce0977f6cb361ec097c92d9b817 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Sat, 20 Apr 2019 18:13:45 +1000 Subject: [PATCH 417/481] Change sorted[-1] to max() (#972) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 624413879..9da1ba25c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1236,7 +1236,7 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: # Compute exit queue epoch exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH] - exit_queue_epoch = sorted(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))])[-1] + exit_queue_epoch = max(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))]) exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) if exit_queue_churn >= get_churn_limit(state): exit_queue_epoch += 1 From 08d921a6c9338ac901a53da1cf7b59da8b7f2990 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sat, 20 Apr 2019 22:48:02 -0700 Subject: [PATCH 418/481] Make crosslink_data_root comment more explicit (#973) --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 9da1ba25c..0ae4c1160 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -295,7 +295,7 @@ The types are defined topologically to aid in facilitating an executable version 'epoch': 'uint64', # Root of the previous crosslink 'previous_crosslink_root': 'bytes32', - # Shard data since the previous crosslink + # Root of the crosslinked shard data since the previous crosslink 'crosslink_data_root': 'bytes32', } ``` From 04d498695e680e2c4c7a8b575bdd73e4f0265f85 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 22 Apr 2019 14:01:04 +1000 Subject: [PATCH 419/481] update test format docs --- specs/test_formats/ssz_static/core.md | 9 +++++++++ test_generators/ssz_static/README.md | 17 +---------------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md index 8a5067f03..ee712a830 100644 --- a/specs/test_formats/ssz_static/core.md +++ b/specs/test_formats/ssz_static/core.md @@ -13,6 +13,7 @@ type_name: string -- string, object name, formatted as in spec. E.g. "BeaconBlo value: dynamic -- the YAML-encoded value, of the type specified by type_name. serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x +signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x. Optional, present if type contains ``signature`` field ``` ## Condition @@ -20,4 +21,12 @@ root: bytes32 -- string, hash-tree-root of the value, hex encoded, with pre A test-runner can implement the following assertions: - Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized` - Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root` + - Optionally also check signing-root, if present. - Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value` + +## References + + +**`serialized`**: [SSZ serialization](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#serialization) +**`root`** - [hash_tree_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#merkleization) +**`signing_root`** - [signing_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) diff --git a/test_generators/ssz_static/README.md b/test_generators/ssz_static/README.md index 01892ecc2..d73556e1b 100644 --- a/test_generators/ssz_static/README.md +++ b/test_generators/ssz_static/README.md @@ -3,19 +3,4 @@ The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: the serialization and hashing of ETH 2.0 data types -#### Test case -Example: -```yaml -- type_name: DepositData - value: {pubkey: '0x364194dbcda9974ec8e57aa0d556ced515e43ce450e21aa8f9b2099a528679fcf45aed142db60b7f848bd399b63f0933', - withdrawal_credentials: '0xad1256c89ae823b24e1d81fae3d3d382d60012d8399f469ff404e3bbf908027a', - amount: 2672254660871140633, signature: '0x5c3fe3bdbf58d0fb4cdb63a19a67082c697ef910c182dc824c8fb048c935b4b46f522c36047ae36feef84654c1e868f3a0edd76852c09e35414782160767439b49aceaa4219cc25016effcc82a9e17b336efee40ab37e3a47fc31da557027491'} - serialized: '0x364194dbcda9974ec8e57aa0d556ced515e43ce450e21aa8f9b2099a528679fcf45aed142db60b7f848bd399b63f0933ad1256c89ae823b24e1d81fae3d3d382d60012d8399f469ff404e3bbf908027a19359bb274c115255c3fe3bdbf58d0fb4cdb63a19a67082c697ef910c182dc824c8fb048c935b4b46f522c36047ae36feef84654c1e868f3a0edd76852c09e35414782160767439b49aceaa4219cc25016effcc82a9e17b336efee40ab37e3a47fc31da557027491' - root: '0x2eaae270579fc1a1eabde69c841221cb3dfab9de7ad99fcfbee8fe0c198878b7' - signing_root: '0x844655facb151b633410ffc698d8467c6488ae87f2d5f739d39c9bfc18750524' -``` -**type_name** - Name of valid Eth2.0 type from the spec -**value** - Field values used to create type instance -**serialized** - [SSZ serialization](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#serialization) of the value -**root** - [hash_tree_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#merkleization) of the value -**signing_root** - (Optional) [signing_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) of the value, if type contains ``signature`` field \ No newline at end of file +Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md). From 0da60ba90d09a60ea926ccd046b3edf267fc05f3 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 22 Apr 2019 15:12:30 +1000 Subject: [PATCH 420/481] Fix activation queue bug Fix bug [flagged by @NIC619 and @hwwhww](https://github.com/ethereum/eth2.0-specs/pull/850#issuecomment-485275575) whereby the `activation_epoch` of validators dequeued since the finalized epoch was overwritten. Cosmetic changes: 1) Remove `activate_validator` (there is no overlap between genesis and non-genesis activations) 2) Improve comments related to activation queue --- specs/core/0_beacon-chain.md | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0ae4c1160..ea2225ffd 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -94,7 +94,6 @@ - [`bls_verify_multiple`](#bls_verify_multiple) - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) - [Routines for updating validator status](#routines-for-updating-validator-status) - - [`activate_validator`](#activate_validator) - [`initiate_validator_exit`](#initiate_validator_exit) - [`slash_validator`](#slash_validator) - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) @@ -1205,22 +1204,6 @@ def get_churn_limit(state: BeaconState) -> int: Note: All functions in this section mutate `state`. -#### `activate_validator` - -```python -def activate_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Activate the validator of the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - if state.slot == GENESIS_SLOT: - validator.activation_eligibility_epoch = GENESIS_EPOCH - validator.activation_epoch = GENESIS_EPOCH - else: - validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) -``` - #### `initiate_validator_exit` ```python @@ -1340,9 +1323,10 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], process_deposit(state, deposit) # Process genesis activations - for index in range(len(state.validator_registry)): + for index, validator in enumerate(state.validator_registry): if get_effective_balance(state, index) >= MAX_DEPOSIT_AMOUNT: - activate_validator(state, index) + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): @@ -1745,14 +1729,16 @@ def process_registry_updates(state: BeaconState) -> None: if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: initiate_validator_exit(state, index) - # Process activations + # Queue validators are eligible for activation and not dequeued prior to finalized epoch activation_queue = sorted([ index for index, validator in enumerate(state.validator_registry) if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch) ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) + # Dequeued validators for activation up to churn limit (without resetting activation epoch) for index in activation_queue[:get_churn_limit(state)]: - activate_validator(state, index) + if validator.activation_epoch != FAR_FUTURE_EPOCH: + validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` #### Slashings From dc275f024d04265deaad09fc28c1e7289db43573 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 22 Apr 2019 15:16:34 +1000 Subject: [PATCH 421/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ea2225ffd..d04f12a6d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1729,7 +1729,7 @@ def process_registry_updates(state: BeaconState) -> None: if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: initiate_validator_exit(state, index) - # Queue validators are eligible for activation and not dequeued prior to finalized epoch + # Queue validators eligible for activation and not dequeued for activation prior to finalized epoch activation_queue = sorted([ index for index, validator in enumerate(state.validator_registry) if validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and From c123fb1b975ba2f2ba41e0637f70f5a693cdf2a3 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Mon, 22 Apr 2019 16:13:46 +1000 Subject: [PATCH 422/481] =?UTF-8?q?Single=20effective=20balance=20per=20re?= =?UTF-8?q?view=20by=20Vitalik=E2=80=94significant=20simplification?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- specs/core/0_beacon-chain.md | 106 +++++++++++++---------------------- 1 file changed, 40 insertions(+), 66 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 32f24b2e3..8739aead9 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -58,7 +58,6 @@ - [`is_active_validator`](#is_active_validator) - [`is_slashable_validator`](#is_slashable_validator) - [`get_active_validator_indices`](#get_active_validator_indices) - - [`get_current_epoch_effective_balance`](#get_current_epoch_effective_balance) - [`increase_balance`](#increase_balance) - [`decrease_balance`](#decrease_balance) - [`get_permuted_index`](#get_permuted_index) @@ -77,7 +76,6 @@ - [`get_attesting_indices`](#get_attesting_indices) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) - - [`get_effective_balance`](#get_effective_balance) - [`get_total_balance`](#get_total_balance) - [`get_domain`](#get_domain) - [`get_bitfield_bit`](#get_bitfield_bit) @@ -194,7 +192,7 @@ These configurations are updated for releases, but may be out of sync during `de | `MIN_DEPOSIT_AMOUNT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | | `MAX_DEPOSIT_AMOUNT` | `2**5 * 10**9` (= 32,000,000,000) | Gwei | | `EJECTION_BALANCE` | `2**4 * 10**9` (= 16,000,000,000) | Gwei | -| `HIGH_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | +| `EFFECTIVE_BALANCE_INCREMENT` | `2**0 * 10**9` (= 1,000,000,000) | Gwei | ### Initial values @@ -710,21 +708,6 @@ def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[Valid return [i for i, v in enumerate(state.validator_registry) if is_active_validator(v, epoch)] ``` -### `get_current_epoch_effective_balance` - -```python -def get_current_epoch_effective_balance(state: BeaconState, index: ValidatorIndex) -> Gwei: - """ - Get validator effective balance for the current epoch - """ - balance = min(state.balances[index], MAX_DEPOSIT_AMOUNT) - validator = state.validator_registry[index] - HALF_INCREMENT = HIGH_BALANCE_INCREMENT // 2 - if state.slot == GENESIS_SLOT or (validator.effective_balance > balance or validator.effective_balance + 3 * HALF_INCREMENT < balance): - return balance - balance % HIGH_BALANCE_INCREMENT - return validator.effective_balance -``` - ### `increase_balance` ```python @@ -944,7 +927,7 @@ def get_beacon_proposer_index(state: BeaconState, slot: Slot=None) -> ValidatorI while True: candidate = first_committee[(epoch + i) % len(first_committee)] random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32] - if get_effective_balance(state, candidate, epoch) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: + if state.validator_registry[candidate].effective_balance * 256 > MAX_DEPOSIT_AMOUNT * random_byte: return candidate i += 1 ``` @@ -992,24 +975,14 @@ def bytes_to_int(data: bytes) -> int: return int.from_bytes(data, 'little') ``` -### `get_effective_balance` - -```python -def get_effective_balance(state: BeaconState, index: ValidatorIndex, epoch: Epoch) -> Gwei: - """ - Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. - """ - return get_current_epoch_effective_balance(state, index) if epoch == get_current_epoch(state) else state.validator_registry[index].effective_balance -``` - ### `get_total_balance` ```python -def get_total_balance(state: BeaconState, validators: List[ValidatorIndex], epoch: Epoch) -> Gwei: +def get_total_balance(state: BeaconState, indices: List[ValidatorIndex]) -> Gwei: """ Return the combined effective balance of an array of ``validators``. """ - return sum([get_effective_balance(state, index, epoch) for index in validators]) + return sum([state.validator_registry[index].effective_balance for index in indices]) ``` ### `get_domain` @@ -1244,9 +1217,10 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl """ current_epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) - state.validator_registry[slashed_index].slashed = True - state.validator_registry[slashed_index].withdrawable_epoch = current_epoch + LATEST_SLASHED_EXIT_LENGTH - slashed_balance = get_effective_balance(state, slashed_index, current_epoch) + slashed_validator = state.validator_registry[slashed_index] + slashed_validator.slashed = True + slashed_validator.withdrawable_epoch = current_epoch + LATEST_SLASHED_EXIT_LENGTH + slashed_balance = slashed_validator.effective_balance state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance proposer_index = get_beacon_proposer_index(state) @@ -1463,8 +1437,8 @@ The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SL We define epoch transition helper functions: ```python -def get_total_active_balance(state: BeaconState, epoch: Epoch) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state, epoch), epoch) +def get_total_active_balance(state: BeaconState) -> Gwei: + return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) ``` ```python @@ -1497,8 +1471,8 @@ def get_unslashed_attesting_indices(state: BeaconState, attestations: List[Pendi ``` ```python -def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation], epoch: Epoch) -> Gwei: - return get_total_balance(state, get_unslashed_attesting_indices(state, attestations), epoch) +def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: + return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) ``` ```python @@ -1526,7 +1500,7 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink] # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) winning_crosslink = max(candidate_crosslinks, key=lambda crosslink: ( - get_attesting_balance(state, get_attestations_for(crosslink), epoch), crosslink.crosslink_data_root + get_attesting_balance(state, get_attestations_for(crosslink)), crosslink.crosslink_data_root )) return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink)) @@ -1557,13 +1531,13 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_epoch = state.current_justified_epoch state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 - previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, previous_epoch), previous_epoch) - if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state, previous_epoch) * 2: + previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, previous_epoch)) + if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: state.current_justified_epoch = get_previous_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 1) - current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, current_epoch), current_epoch) - if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state, current_epoch) * 2: + current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, current_epoch)) + if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: state.current_justified_epoch = get_current_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 0) @@ -1601,7 +1575,7 @@ def process_crosslinks(state: BeaconState) -> None: epoch = slot_to_epoch(slot) for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) - if 3 * get_total_balance(state, attesting_indices, epoch) >= 2 * get_total_balance(state, crosslink_committee, epoch): + if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee): state.current_crosslinks[shard] = winning_crosslink ``` @@ -1610,18 +1584,18 @@ def process_crosslinks(state: BeaconState) -> None: First, we define additional helpers: ```python -def get_base_reward(state: BeaconState, index: ValidatorIndex, epoch: Epoch) -> Gwei: - adjusted_quotient = integer_squareroot(get_total_active_balance(state, epoch)) // BASE_REWARD_QUOTIENT +def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: + adjusted_quotient = integer_squareroot(get_total_active_balance(state)) // BASE_REWARD_QUOTIENT if adjusted_quotient == 0: return 0 - return get_effective_balance(state, index, epoch) // adjusted_quotient // BASE_REWARDS_PER_EPOCH + return state.validator_registry[index].effective_balance // adjusted_quotient // BASE_REWARDS_PER_EPOCH ``` ```python def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: previous_epoch = get_previous_epoch(state) - total_balance = get_total_active_balance(state, previous_epoch) + total_balance = get_total_active_balance(state) eligible_validator_indices = [ index for index, validator in enumerate(state.validator_registry) if is_active_validator(validator, previous_epoch) or (validator.slashed and previous_epoch < validator.withdrawable_epoch) @@ -1629,7 +1603,7 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] for index in eligible_validator_indices: - base_reward = get_base_reward(state, index, previous_epoch) + base_reward = get_base_reward(state, index) # Micro-incentives for attestations matching FFG source, FFG target, and head for attestations in ( @@ -1638,7 +1612,7 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: get_matching_source_attestations(state, previous_epoch), ): if index in get_unslashed_attesting_indices(state, attestations): - rewards[index] += base_reward * get_attesting_balance(state, attestations, previous_epoch) // total_balance + rewards[index] += base_reward * get_attesting_balance(state, attestations) // total_balance else: penalties[index] += base_reward @@ -1656,7 +1630,7 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY: penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward if index not in get_unslashed_attesting_indices(state, get_matching_target_attestations(state, previous_epoch)): - penalties[index] += get_effective_balance(state, index, previous_epoch) * finality_delay // INACTIVITY_PENALTY_QUOTIENT + penalties[index] += state.validator_registry[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT return [rewards, penalties] ``` @@ -1669,10 +1643,10 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: epoch = slot_to_epoch(slot) for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) - attesting_balance = get_total_balance(state, attesting_indices, epoch) - committee_balance = get_total_balance(state, crosslink_committee, epoch) + attesting_balance = get_total_balance(state, attesting_indices) + committee_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: - base_reward = get_base_reward(state, index, epoch) + base_reward = get_base_reward(state, index) if index in attesting_indices: rewards[index] += base_reward * attesting_balance // committee_balance else: @@ -1726,7 +1700,7 @@ Run the following function: def process_slashings(state: BeaconState) -> None: current_epoch = get_current_epoch(state) active_validator_indices = get_active_validator_indices(state, current_epoch) - total_balance = get_total_balance(state, active_validator_indices, current_epoch) + total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` total_at_start = state.latest_slashed_balances[(current_epoch + 1) % LATEST_SLASHED_EXIT_LENGTH] @@ -1736,8 +1710,8 @@ def process_slashings(state: BeaconState) -> None: for index, validator in enumerate(state.validator_registry): if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: penalty = max( - get_effective_balance(state, index, current_epoch) * min(total_penalties * 3, total_balance) // total_balance, - get_effective_balance(state, index, current_epoch) // MIN_PENALTY_QUOTIENT + validator.effective_balance * min(total_penalties * 3, total_balance) // total_balance, + validator.effective_balance // MIN_PENALTY_QUOTIENT ) decrease_balance(state, index, penalty) ``` @@ -1753,9 +1727,12 @@ def process_final_updates(state: BeaconState) -> None: # Reset eth1 data votes if state.slot % SLOTS_PER_ETH1_VOTING_PERIOD == 0: state.eth1_data_votes = [] - # Update effective balances + # Update effective balances with hysteresis for index, validator in enumerate(state.validator_registry): - validator.effective_balance = get_current_epoch_effective_balance(state, index) + balance = min(state.balances[index], MAX_DEPOSIT_AMOUNT) + HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2 + if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance: + validator.effective_balance = balance - balance % EFFECTIVE_BALANCE_INCREMENT # Update start shard state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT # Set active index root @@ -1980,20 +1957,17 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): return - # Add new validator - validator = Validator( + # Add validator and balance entries + state.validator_registry.append(Validator( pubkey=pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - ) - state.validator_registry.append(validator) - - # Add initial balance + effective_balance=amount - amount % EFFECTIVE_BALANCE_INCREMENT + )) state.balances.append(amount) - validator.effective_balance = get_current_epoch_effective_balance(state, len(state.validator_registry) - 1) else: # Increase balance by deposit amount index = validator_pubkeys.index(pubkey) From 6903f2eec738a3f486e9704e96ed62708271c0e1 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 22 Apr 2019 16:17:14 +1000 Subject: [PATCH 423/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 8739aead9..0adee0f95 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1965,7 +1965,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - effective_balance=amount - amount % EFFECTIVE_BALANCE_INCREMENT + effective_balance=amount - amount % EFFECTIVE_BALANCE_INCREMENT, )) state.balances.append(amount) else: From 81ee59bca85350fd479a7833045b857284c56834 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 22 Apr 2019 16:34:50 +1000 Subject: [PATCH 424/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0adee0f95..931511b36 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -238,7 +238,7 @@ These configurations are updated for releases, but may be out of sync during `de | `WHISTLEBLOWING_REWARD_QUOTIENT` | `2**9` (= 512) | | `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) | -| `MIN_PENALTY_QUOTIENT` | `2**5` (= 32) | +| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) | * The `BASE_REWARD_QUOTIENT` parameter dictates the per-epoch reward. It corresponds to ~2.54% annual interest assuming 10 million participating ETH in every epoch. * The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (~18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating [validators](#dfn-validator) to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline [validators](#dfn-validator) after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)` so after `INVERSE_SQRT_E_DROP_TIME` epochs it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. @@ -401,7 +401,7 @@ The types are defined topologically to aid in facilitating an executable version 'withdrawable_epoch': 'uint64', # Was the validator slashed 'slashed': 'bool', - # Rounded balance + # Effective balance 'effective_balance': 'uint64', } ``` @@ -725,7 +725,7 @@ def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> """ Decrease validator balance by ``delta`` with underflow protection. """ - state.balances[index] = state.balances[index] - delta if state.balances[index] >= delta else 0 + state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta ``` ### `get_permuted_index` @@ -925,10 +925,12 @@ def get_beacon_proposer_index(state: BeaconState, slot: Slot=None) -> ValidatorI first_committee, _ = get_crosslink_committees_at_slot(state, slot if slot != None else state.slot)[0] i = 0 while True: - candidate = first_committee[(epoch + i) % len(first_committee)] + candidate_index = first_committee[(epoch + i) % len(first_committee)] random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32] - if state.validator_registry[candidate].effective_balance * 256 > MAX_DEPOSIT_AMOUNT * random_byte: - return candidate + MAX_RANDOM_BYTE = 2**8 - 1 + effective_balance = state.validator_registry[candidate_index].effective_balance + if effective_balance * MAX_RANDOM_BYTE >= MAX_DEPOSIT_AMOUNT * random_byte: + return candidate_index i += 1 ``` @@ -1217,10 +1219,9 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl """ current_epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) - slashed_validator = state.validator_registry[slashed_index] - slashed_validator.slashed = True - slashed_validator.withdrawable_epoch = current_epoch + LATEST_SLASHED_EXIT_LENGTH - slashed_balance = slashed_validator.effective_balance + state.validator_registry[slashed_index].slashed = True + state.validator_registry[slashed_index].withdrawable_epoch = current_epoch + LATEST_SLASHED_EXIT_LENGTH + slashed_balance = state.validator_registry[slashed_index].effective_balance state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance proposer_index = get_beacon_proposer_index(state) @@ -1371,9 +1372,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices] - # Use the rounded-balance-with-hysteresis supplied by the protocol for fork - # choice voting. This reduces the number of recomputations that need to be - # made for optimized implementations that precompute and save data + # Use the effective balance for fork choice voting to reduce recomputations and save bandwidth def get_vote_count(block: BeaconBlock) -> int: return sum( start_state.validator_registry[validator_index].effective_balance @@ -1711,7 +1710,7 @@ def process_slashings(state: BeaconState) -> None: if validator.slashed and current_epoch == validator.withdrawable_epoch - LATEST_SLASHED_EXIT_LENGTH // 2: penalty = max( validator.effective_balance * min(total_penalties * 3, total_balance) // total_balance, - validator.effective_balance // MIN_PENALTY_QUOTIENT + validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT ) decrease_balance(state, index, penalty) ``` @@ -1965,7 +1964,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, - effective_balance=amount - amount % EFFECTIVE_BALANCE_INCREMENT, + effective_balance=amount - amount % EFFECTIVE_BALANCE_INCREMENT )) state.balances.append(amount) else: From 92e4bba7df4f4cc67911253e1822bbac90562c96 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Mon, 22 Apr 2019 16:38:44 +1000 Subject: [PATCH 425/481] small constants update to reflect new genesis slot, and rename block sig domain (#978) --- configs/constant_presets/mainnet.yaml | 6 +++--- configs/constant_presets/minimal.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index d06febb77..8b9dade73 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -42,8 +42,8 @@ HIGH_BALANCE_INCREMENT: 1000000000 # Initial values # --------------------------------------------------------------- GENESIS_FORK_VERSION: 0x00000000 -# 2**32, GENESIS_EPOCH is derived from this constant -GENESIS_SLOT: 4294967296 +# 0, GENESIS_EPOCH is derived from this constant +GENESIS_SLOT: 0 GENESIS_START_SHARD: 0 # 2**64 - 1 FAR_FUTURE_EPOCH: 18446744073709551615 @@ -116,7 +116,7 @@ MAX_TRANSFERS: 16 # Signature domains # --------------------------------------------------------------- -DOMAIN_BEACON_BLOCK: 0 +DOMAIN_BEACON_PROPOSER: 0 DOMAIN_RANDAO: 1 DOMAIN_ATTESTATION: 2 DOMAIN_DEPOSIT: 3 diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index 80af5398c..edc447c45 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -42,8 +42,8 @@ HIGH_BALANCE_INCREMENT: 1000000000 # Initial values # --------------------------------------------------------------- GENESIS_FORK_VERSION: 0x00000000 -# 2**32, GENESIS_EPOCH is derived from this constant -GENESIS_SLOT: 4294967296 +# 0, GENESIS_EPOCH is derived from this constant +GENESIS_SLOT: 0 GENESIS_START_SHARD: 0 # 2**64 - 1 FAR_FUTURE_EPOCH: 18446744073709551615 @@ -116,7 +116,7 @@ MAX_TRANSFERS: 16 # Signature domains # --------------------------------------------------------------- -DOMAIN_BEACON_BLOCK: 0 +DOMAIN_BEACON_PROPOSER: 0 DOMAIN_RANDAO: 1 DOMAIN_ATTESTATION: 2 DOMAIN_DEPOSIT: 3 From 7043bb90800b2be43781e36b6f87472bf0e38eba Mon Sep 17 00:00:00 2001 From: Dmitrii Shmatko Date: Mon, 22 Apr 2019 12:09:56 +0300 Subject: [PATCH 426/481] test: clean up of ssz_static references styling --- specs/test_formats/ssz_static/core.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md index ee712a830..059f11027 100644 --- a/specs/test_formats/ssz_static/core.md +++ b/specs/test_formats/ssz_static/core.md @@ -28,5 +28,5 @@ A test-runner can implement the following assertions: **`serialized`**: [SSZ serialization](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#serialization) -**`root`** - [hash_tree_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#merkleization) -**`signing_root`** - [signing_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) +**`root`** - [hash_tree_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#merkleization) function +**`signing_root`** - [signing_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) function From 1c5cc1299a7c94e8f9dc123ef103c4d84c8971ff Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 22 Apr 2019 20:49:07 +1000 Subject: [PATCH 427/481] Update specs/core/0_beacon-chain.md Co-Authored-By: JustinDrake --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index d04f12a6d..b2796a588 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1737,7 +1737,7 @@ def process_registry_updates(state: BeaconState) -> None: ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) # Dequeued validators for activation up to churn limit (without resetting activation epoch) for index in activation_queue[:get_churn_limit(state)]: - if validator.activation_epoch != FAR_FUTURE_EPOCH: + if validator.activation_epoch == FAR_FUTURE_EPOCH: validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` From 94049490153fba2dd976f19a8ab817694a540688 Mon Sep 17 00:00:00 2001 From: Justin Date: Mon, 22 Apr 2019 23:18:17 +1000 Subject: [PATCH 428/481] Split off fork choice in a separate document --- specs/core/0_beacon-chain.md | 79 ------------------------------------ 1 file changed, 79 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 0ae4c1160..614699871 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -104,8 +104,6 @@ - [`Eth2Genesis` log](#eth2genesis-log) - [Vyper code](#vyper-code) - [On genesis](#on-genesis) - - [Beacon chain processing](#beacon-chain-processing) - - [Beacon chain fork choice rule](#beacon-chain-fork-choice-rule) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [State caching](#state-caching) - [Per-epoch processing](#per-epoch-processing) @@ -210,7 +208,6 @@ These configurations are updated for releases, but may be out of sync during `de | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds | | `MIN_ATTESTATION_INCLUSION_DELAY` | `2**2` (= 4) | slots | 24 seconds | | `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes | | `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes | @@ -1351,82 +1348,6 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], return state ``` -## Beacon chain processing - -The beacon chain is the system chain for Ethereum 2.0. The main responsibilities of the beacon chain are as follows: - -* Store and maintain the registry of [validators](#dfn-validator) -* Process crosslinks (see above) -* Process its per-block consensus, as well as the finality gadget - -Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". However, because of the beacon chain's relationship with Ethereum 1.0, and because it is a proof-of-stake chain, there are differences. - -For a beacon chain block, `block`, to be processed by a node, the following conditions must be met: - -* The parent block with root `block.previous_block_root` has been processed and accepted. -* An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted. -* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`. (Note that leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.) - -If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. - -Beacon block production is significantly different because of the proof-of-stake mechanism. A client simply checks what it thinks is the canonical chain when it should create a block and looks up what its slot number is; when the slot arrives, it either proposes or attests to a block as required. Note that this requires each node to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. - -### Beacon chain fork choice rule - -The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a [validator](#dfn-validator) `v` subjectively calculates the beacon chain head as follows. - -* Abstractly define `Store` as the type of storage object for the chain data and `store` be the set of attestations and blocks that the [validator](#dfn-validator) `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`. -* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store` the processing of which sets `B` as finalized.) -* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists set `justified_head` to `finalized_head`. -* Let `get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock` be the ancestor of `block` with slot number `slot`. The `get_ancestor` function can be defined recursively as: - -```python -def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: - """ - Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found. - """ - if block.slot == slot: - return block - elif block.slot < slot: - return None - else: - return get_ancestor(store, store.get_parent(block), slot) -``` - -* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first. -* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`. -* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`. -* Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`. -* The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count. - -```python -def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock: - """ - Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``. - """ - validators = start_state.validator_registry - active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) - attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices] - - # Use the rounded-balance-with-hysteresis supplied by the protocol for fork - # choice voting. This reduces the number of recomputations that need to be - # made for optimized implementations that precompute and save data - def get_vote_count(block: BeaconBlock) -> int: - return sum( - start_state.validator_registry[validator_index].high_balance - for validator_index, target in attestation_targets - if get_ancestor(store, target, block.slot) == block - ) - - head = start_block - while 1: - children = get_children(store, head) - if len(children) == 0: - return head - # Ties broken by favoring block with lexicographically higher root - head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) -``` - ## Beacon chain state transition function We now define the state transition function. At a high level, the state transition is made up of four parts: From a103e79e676ca08cac0040f60c90fecf7e2ea3f2 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Mon, 22 Apr 2019 23:20:48 +1000 Subject: [PATCH 429/481] Add 0_fork-choice.md --- specs/core/0_fork-choice.md | 96 +++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 specs/core/0_fork-choice.md diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md new file mode 100644 index 000000000..53eccf7ea --- /dev/null +++ b/specs/core/0_fork-choice.md @@ -0,0 +1,96 @@ +# Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice + +**NOTICE**: This document is a work in progress for researchers and implementers. + +## Table of contents + + +- [Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice](#ethereum-20-phase-0----beacon-chain-fork-choice) + - [Table of contents](#table-of-contents) + - [Introduction](#introduction) + - [Constants](#constants) + - [Time parameters](#time-parameters) + - [Beacon chain processing](#beacon-chain-processing) + - [Beacon chain fork choice rule](#beacon-chain-fork-choice-rule) + + + +## Introduction + +This document represents is the specification for the beacon chain fork choice rule, part of phase 0 of Ethereum 2.0. + +## Constants + +### Time parameters + +| Name | Value | Unit | Duration | +| - | - | :-: | :-: | +| `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds | + +## Beacon chain processing + +Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". For a beacon block, `block`, to be processed by a node, the following conditions must be met: + +* The parent block with root `block.previous_block_root` has been processed and accepted. +* An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted. +* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`. + +Note: Leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year. + +Note: Nodes needs to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes. + +### Beacon chain fork choice rule + +The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a [validator](#dfn-validator) `v` subjectively calculates the beacon chain head as follows. + +* Abstractly define `Store` as the type of storage object for the chain data and `store` be the set of attestations and blocks that the [validator](#dfn-validator) `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`. +* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store` the processing of which sets `B` as finalized.) +* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists set `justified_head` to `finalized_head`. +* Let `get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock` be the ancestor of `block` with slot number `slot`. The `get_ancestor` function can be defined recursively as: + +```python +def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: + """ + Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found. + """ + if block.slot == slot: + return block + elif block.slot < slot: + return None + else: + return get_ancestor(store, store.get_parent(block), slot) +``` + +* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first. +* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`. +* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`. +* Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`. +* The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count. + +```python +def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock: + """ + Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``. + """ + validators = start_state.validator_registry + active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) + attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices] + + # Use the rounded-balance-with-hysteresis supplied by the protocol for fork + # choice voting. This reduces the number of recomputations that need to be + # made for optimized implementations that precompute and save data + def get_vote_count(block: BeaconBlock) -> int: + return sum( + start_state.validator_registry[validator_index].high_balance + for validator_index, target in attestation_targets + if get_ancestor(store, target, block.slot) == block + ) + + head = start_block + while 1: + children = get_children(store, head) + if len(children) == 0: + return head + # Ties broken by favoring block with lexicographically higher root + head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) +``` From 0bdd8e778bd8de5a035ab11bdc2066a4dd6cb1b9 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Mon, 22 Apr 2019 23:29:19 +1000 Subject: [PATCH 430/481] Add 0_deposit-contract.md --- specs/core/0_beacon-chain.md | 51 +-------------------- specs/core/0_deposit-contract.md | 76 ++++++++++++++++++++++++++++++++ specs/core/0_fork-choice.md | 2 +- 3 files changed, 78 insertions(+), 51 deletions(-) create mode 100644 specs/core/0_deposit-contract.md diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 614699871..b6a89f679 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -97,12 +97,6 @@ - [`activate_validator`](#activate_validator) - [`initiate_validator_exit`](#initiate_validator_exit) - [`slash_validator`](#slash_validator) - - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) - - [Deposit arguments](#deposit-arguments) - - [Withdrawal credentials](#withdrawal-credentials) - - [`Deposit` logs](#deposit-logs) - - [`Eth2Genesis` log](#eth2genesis-log) - - [Vyper code](#vyper-code) - [On genesis](#on-genesis) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [State caching](#state-caching) @@ -182,7 +176,6 @@ These configurations are updated for releases, but may be out of sync during `de | Name | Value | | - | - | -| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** | | `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) | ### Gwei values @@ -1267,49 +1260,6 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl decrease_balance(state, slashed_index, whistleblowing_reward) ``` -## Ethereum 1.0 deposit contract - -The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state. - -### Deposit arguments - -The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`. - -### Withdrawal credentials - -One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: - -* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE` -* `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey - -The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage. - -### `Deposit` logs - -Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract. - -### `Eth2Genesis` log - -When a sufficient amount of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: - -* `genesis_time` equals `time` in the `Eth2Genesis` log -* `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log -* `latest_eth1_data.deposit_count` equals `deposit_count` in the `Eth2Genesis` log -* `latest_eth1_data.block_hash` equals the hash of the block that included the log -* `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest) - -### Vyper code - -The source for the Vyper contract lives in a [separate repository](https://github.com/ethereum/deposit_contract) at [https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py](https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py). - -Note: to save ~10x on gas this contract uses a somewhat unintuitive progressive Merkle root calculation algo that requires only O(log(n)) storage. See https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py for an implementation of the same algo in python tested for correctness. - -For convenience, we provide the interface to the contract here: - -* `__init__()`: initializes the contract -* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree -* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. - ## On genesis When enough full deposits have been made to the deposit contract, an `Eth2Genesis` log is emitted. Construct a corresponding `genesis_state` and `genesis_block` as follows: @@ -1348,6 +1298,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], return state ``` + ## Beacon chain state transition function We now define the state transition function. At a high level, the state transition is made up of four parts: diff --git a/specs/core/0_deposit-contract.md b/specs/core/0_deposit-contract.md new file mode 100644 index 000000000..6652ae2c2 --- /dev/null +++ b/specs/core/0_deposit-contract.md @@ -0,0 +1,76 @@ +# Ethereum 2.0 Phase 0 -- Deposit Contract + +**NOTICE**: This document is a work in progress for researchers and implementers. + +## Table of contents + + +- [Ethereum 2.0 Phase 0 -- Deposit Contract](#ethereum-20-phase-0----deposit-contract) + - [Table of contents](#table-of-contents) + - [Introduction](#introduction) + - [Constants](#constants) + - [Deposit contract](#time-parameters) + - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) + - [Deposit arguments](#deposit-arguments) + - [Withdrawal credentials](#withdrawal-credentials) + - [`Deposit` logs](#deposit-logs) + - [`Eth2Genesis` log](#eth2genesis-log) + - [Vyper code](#vyper-code) + + + +## Introduction + +This document represents is the specification for the beacon chain deposit contract, part of Ethereum 2.0 phase 0. + +## Constants + +### Deposit contract + +| Name | Value | +| - | - | +| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** | +| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) | + +## Ethereum 1.0 deposit contract + +The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state. + +### Deposit arguments + +The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`. + +### Withdrawal credentials + +One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows: + +* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE` +* `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey + +The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage. + +### `Deposit` logs + +Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract. + +### `Eth2Genesis` log + +When a sufficient amount of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where: + +* `genesis_time` equals `time` in the `Eth2Genesis` log +* `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log +* `latest_eth1_data.deposit_count` equals `deposit_count` in the `Eth2Genesis` log +* `latest_eth1_data.block_hash` equals the hash of the block that included the log +* `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest) + +### Vyper code + +The source for the Vyper contract lives in a [separate repository](https://github.com/ethereum/deposit_contract) at [https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py](https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py). + +Note: to save ~10x on gas this contract uses a somewhat unintuitive progressive Merkle root calculation algo that requires only O(log(n)) storage. See https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py for an implementation of the same algo in python tested for correctness. + +For convenience, we provide the interface to the contract here: + +* `__init__()`: initializes the contract +* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree +* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. diff --git a/specs/core/0_fork-choice.md b/specs/core/0_fork-choice.md index 53eccf7ea..41377ebd4 100644 --- a/specs/core/0_fork-choice.md +++ b/specs/core/0_fork-choice.md @@ -17,7 +17,7 @@ ## Introduction -This document represents is the specification for the beacon chain fork choice rule, part of phase 0 of Ethereum 2.0. +This document represents is the specification for the beacon chain fork choice rule, part of Ethereum 2.0 phase 0. ## Constants From edb24ce9dda0c54c5cc86520f7ef99091ce7af10 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 22 Apr 2019 09:00:01 -0600 Subject: [PATCH 431/481] test rule 3 --- specs/core/0_beacon-chain.md | 4 + test_libs/pyspec/tests/test_finality.py | 158 +++++++++++++++++++----- 2 files changed, 132 insertions(+), 30 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4cdfa9c4f..1df72179d 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1682,12 +1682,16 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) + print(previous_boundary_attesting_balance) if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: + print("prev success") state.current_justified_epoch = get_previous_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 1) current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) + print(current_boundary_attesting_balance) if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: + print("cur success") state.current_justified_epoch = get_current_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 0) diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/tests/test_finality.py index 8a429cb6e..ae8ba13ff 100644 --- a/test_libs/pyspec/tests/test_finality.py +++ b/test_libs/pyspec/tests/test_finality.py @@ -47,11 +47,50 @@ def check_finality(state, assert state.finalized_root == prev_state.finalized_root +def next_epoch_with_attestations(state, + fill_cur_epoch, + fill_prev_epoch): + post_state = deepcopy(state) + blocks = [] + for slot in range(spec.SLOTS_PER_EPOCH): + print("slot: %s", post_state.slot) + block = build_empty_block_for_next_slot(post_state) + if fill_prev_epoch: + print("prev") + slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 + prev_attestation = get_valid_attestation(post_state, slot_to_attest) + block.body.attestations.append(prev_attestation) + + if fill_cur_epoch: + print("cur") + slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)): + cur_attestation = get_valid_attestation(post_state, slot_to_attest) + fill_aggregate_attestation(post_state, cur_attestation) + block.body.attestations.append(cur_attestation) + + state_transition(post_state, block) + blocks.append(block) + + # if fill_prev_epoch: + # assert len(post_state.previous_epoch_attestations) >= 0 + # else: + # assert len(post_state.previous_epoch_attestations) == 0 + + # if fill_cur_epoch: + # assert len(post_state.current_epoch_attestations) >= 0 + # else: + # assert len(post_state.current_epoch_attestations) == 0 + + return state, blocks, post_state + + def test_finality_from_genesis_rule_4(state): test_state = deepcopy(state) blocks = [] for epoch in range(6): + prev_state = deepcopy(test_state) old_current_justified_epoch = test_state.current_justified_epoch old_current_justified_root = test_state.current_justified_root for slot in range(spec.SLOTS_PER_EPOCH): @@ -67,14 +106,14 @@ def test_finality_from_genesis_rule_4(state): blocks.append(block) if epoch == 0: - check_finality(test_state, state, False, False, False) + check_finality(test_state, prev_state, False, False, False) elif epoch == 1: - check_finality(test_state, state, False, False, False) + check_finality(test_state, prev_state, False, False, False) elif epoch == 2: - check_finality(test_state, state, True, False, False) + check_finality(test_state, prev_state, True, False, False) elif epoch >= 3: # rule 4 of finaliy - check_finality(test_state, state, True, True, True) + check_finality(test_state, prev_state, True, True, True) assert test_state.finalized_epoch == old_current_justified_epoch assert test_state.finalized_root == old_current_justified_root @@ -90,6 +129,7 @@ def test_finality_rule_1(state): blocks = [] for epoch in range(3): + prev_state = deepcopy(test_state) old_previous_justified_epoch = test_state.previous_justified_epoch old_previous_justified_root = test_state.previous_justified_root for slot in range(spec.SLOTS_PER_EPOCH): @@ -106,15 +146,17 @@ def test_finality_rule_1(state): blocks.append(block) if epoch == 0: - check_finality(test_state, state, True, False, False) + check_finality(test_state, prev_state, True, False, False) elif epoch == 1: - check_finality(test_state, state, True, True, False) + check_finality(test_state, prev_state, True, True, False) elif epoch == 2: # finalized by rule 1 - check_finality(test_state, state, True, True, True) + check_finality(test_state, prev_state, True, True, True) assert test_state.finalized_epoch == old_previous_justified_epoch assert test_state.finalized_root == old_previous_justified_root + return state, blocks, test_state + def test_finality_rule_2(state): # get past first two epochs that finality does not run on @@ -127,30 +169,86 @@ def test_finality_rule_2(state): for epoch in range(3): old_previous_justified_epoch = test_state.previous_justified_epoch old_previous_justified_root = test_state.previous_justified_root - for slot in range(spec.SLOTS_PER_EPOCH): - attestation = None - if epoch == 0: - slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - if slot_to_attest >= get_epoch_start_slot(get_current_epoch(state)): - attestation = get_valid_attestation(test_state, slot_to_attest) - fill_aggregate_attestation(test_state, attestation) - if epoch == 2: - slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 - attestation = get_valid_attestation(test_state, slot_to_attest) - fill_aggregate_attestation(test_state, attestation) - - block = build_empty_block_for_next_slot(test_state) - if attestation: - block.body.attestations.append(attestation) - state_transition(test_state, block) - blocks.append(block) - if epoch == 0: - check_finality(test_state, state, True, False, False) - elif epoch == 1: - check_finality(test_state, state, True, True, False) - elif epoch == 2: + prev_state, blocks, test_state = next_epoch_with_attestations(test_state, True, False) + check_finality(test_state, prev_state, True, False, False) + if epoch == 1: + prev_state, blocks, test_state = next_epoch_with_attestations(test_state, False, False) + check_finality(test_state, prev_state, False, True, False) + if epoch == 2: + prev_state, blocks, test_state = next_epoch_with_attestations(test_state, False, True) # finalized by rule 2 - check_finality(test_state, state, True, True, True) + check_finality(test_state, prev_state, True, False, True) assert test_state.finalized_epoch == old_previous_justified_epoch assert test_state.finalized_root == old_previous_justified_root + return state, blocks, test_state + + +def test_finality_rule_3(state): + # get past first two epochs that finality does not run on + next_epoch(state) + next_epoch(state) + + test_state = deepcopy(state) + + blocks = [] + for epoch in range(2): + prev_state = deepcopy(test_state) + for slot in range(spec.SLOTS_PER_EPOCH): + slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + attestation = get_valid_attestation(test_state, slot_to_attest) + fill_aggregate_attestation(test_state, attestation) + block = build_empty_block_for_next_slot(test_state) + block.body.attestations.append(attestation) + state_transition(test_state, block) + + blocks.append(block) + if epoch == 0: + check_finality(test_state, prev_state, True, False, False) + if epoch == 1: + check_finality(test_state, prev_state, True, True, True) + + prev_state = deepcopy(test_state) + next_epoch(test_state) + check_finality(test_state, prev_state, False, True, False) + + + prev_state = deepcopy(test_state) + for slot in range(spec.SLOTS_PER_EPOCH): + slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 + attestation = get_valid_attestation(test_state, slot_to_attest) + fill_aggregate_attestation(test_state, attestation) + block = build_empty_block_for_next_slot(test_state) + block.body.attestations.append(attestation) + state_transition(test_state, block) + + assert len(test_state.previous_epoch_attestations) >= 0 + assert len(test_state.current_epoch_attestations) == 0 + + blocks.append(block) + check_finality(test_state, prev_state, True, False, True) + + + prev_state = deepcopy(test_state) + for slot in range(spec.SLOTS_PER_EPOCH): + prev_slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 + prev_attestation = get_valid_attestation(test_state, prev_slot_to_attest) + fill_aggregate_attestation(test_state, prev_attestation) + + cur_slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 + cur_attestation = get_valid_attestation(test_state, cur_slot_to_attest) + fill_aggregate_attestation(test_state, cur_attestation) + + block = build_empty_block_for_next_slot(test_state) + block.body.attestations.append(prev_attestation) + block.body.attestations.append(cur_attestation) + + state_transition(test_state, block) + + assert len(test_state.previous_epoch_attestations) >= 0 + assert len(test_state.current_epoch_attestations) >= 0 + + blocks.append(block) + check_finality(test_state, prev_state, True, True, True) + + return state, blocks, test_state From 5744fef808e5668ad616ca64685d1c85a5e598b3 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 22 Apr 2019 09:18:20 -0600 Subject: [PATCH 432/481] clean up some notes on deposits --- specs/core/0_beacon-chain.md | 2 +- specs/validator/0_beacon-chain-validator.md | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 20d7f5d43..615c66048 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1345,7 +1345,7 @@ For convenience, we provide the interface to the contract here: * `__init__()`: initializes the contract * `get_deposit_root() -> bytes32`: returns the current root of the deposit tree -* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. +* `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. ## On genesis diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 632bf2b62..966e238ab 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -101,11 +101,10 @@ In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW To submit a deposit: * Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object. -* Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`. -* Set `deposit_data.proof_of_possession = proof_of_possession`. * Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. * Set `deposit_data.amount = amount`. -* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit(deposit_input: bytes[512])` along with `serialize(deposit_data)` as the singular `bytes` input along with a deposit of `amount` Gwei. +* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`. +* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei. _Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`. From d648b091b5bd35fce9653ad0e2167edeeb81d45a Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 22 Apr 2019 09:33:46 -0600 Subject: [PATCH 433/481] lint --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 615c66048..0b2ccf028 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1345,7 +1345,7 @@ For convenience, we provide the interface to the contract here: * `__init__()`: initializes the contract * `get_deposit_root() -> bytes32`: returns the current root of the deposit tree -* `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. +* `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])`: adds a deposit instance to the deposit tree, incorporating the input arguments and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei. ## On genesis From e13cec146661f8f8a47e2de03efb95ecceaeb01f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 22 Apr 2019 10:02:31 -0600 Subject: [PATCH 434/481] increase MAX_TRANSFERS for transfer test --- test_libs/pyspec/tests/test_sanity.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 3442e8182..3201ba936 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -380,7 +380,10 @@ def test_voluntary_exit(state): return pre_state, [initiate_exit_block, exit_block], post_state -def test_transfer(state): +def test_transfer(state, config): + # overwrite default 0 to test + spec.MAX_TRANSFERS = 1 + pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) sender_index = get_active_validator_indices(pre_state, current_epoch)[-1] From d4a33dbcaa093588c9cca1479bb1487ccd630c63 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 22 Apr 2019 15:29:47 -0600 Subject: [PATCH 435/481] add descriptions of typeof and default functions --- specs/core/1_custody-game.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index 88341ae98..c8625af5b 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -28,6 +28,8 @@ - [`BeaconState`](#beaconstate) - [`BeaconBlockBody`](#beaconblockbody) - [Helpers](#helpers) + - [`typeof`](#typeof) + - [`empty`](#empty) - [`get_crosslink_chunk_count`](#get_crosslink_chunk_count) - [`get_custody_chunk_bit`](#get_custody_chunk_bit) - [`epoch_to_custody_period`](#epoch_to_custody_period) @@ -204,6 +206,14 @@ Add the following fields to the end of the specified container objects. Fields w ## Helpers +### `typeof` + +The `typeof` function accepts and SSZ object as a single input and returns the corresponding SSZ type. + +### `empty` + +The `empty` function accepts and SSZ type as input and returns an object of that type with all fields initialized to default values. + ### `get_crosslink_chunk_count` ```python From 2650a2c0616661f8a5bc2e018df5ac34cd2d3027 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 23 Apr 2019 07:16:52 -0700 Subject: [PATCH 436/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 6d4f396ae..ccab159f4 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -797,7 +797,7 @@ def get_split_offset(list_size: int, chunks: int, index: int) -> int: ```python def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: """ - Return the number of committees in one epoch. + Return the number of committees at ``epoch``. """ active_validators = get_active_validator_indices(state, epoch) return max( @@ -813,6 +813,9 @@ def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: ```python def get_shard_delta(state: BeaconState, epoch: Epoch) -> int: + """ + Return the minimum number of shards that get processed at ``epoch``. + """ return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH) ``` From 5619e7df9c04cfa74bdcd60cb5a43fb8215a5d64 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 23 Apr 2019 09:21:30 -0600 Subject: [PATCH 437/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ccab159f4..ed2b1403a 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -814,7 +814,7 @@ def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: ```python def get_shard_delta(state: BeaconState, epoch: Epoch) -> int: """ - Return the minimum number of shards that get processed at ``epoch``. + Return the number of shards to increment ``state.shard_shard`` during ``epoch``. """ return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH) ``` From e26112af37efba0c837a986d06c3a35afb3a54ed Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 23 Apr 2019 08:36:40 -0700 Subject: [PATCH 438/481] Update 0_beacon-chain.md typo fix --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index ed2b1403a..184411925 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -814,7 +814,7 @@ def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: ```python def get_shard_delta(state: BeaconState, epoch: Epoch) -> int: """ - Return the number of shards to increment ``state.shard_shard`` during ``epoch``. + Return the number of shards to increment ``state.latest_start_shard`` during ``epoch``. """ return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH) ``` From 5e4afc2dd0cb34af647bad88620395c04e5bbaab Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:49:59 -0500 Subject: [PATCH 439/481] Update rpc-interface.md --- specs/networking/rpc-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index 5d408b5a0..f1da8f7e3 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -247,7 +247,7 @@ Requests a list of block roots and slots from the peer. The `count` parameter MU Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks. -The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each per has a different starting block in order to populate block data. +The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each peer has a different starting block in order to populate block data. ### Beacon Block Bodies @@ -287,6 +287,6 @@ Requests the `block_bodies` associated with the provided `block_roots` from the **Response Body:** TBD -Requests contain the hashes of Merkle tree nodes that when merkelized yield the block's `state_root`. +Requests contain the hashes of Merkle tree nodes that when merkleized yield the block's `state_root`. The response will contain the values that, when hashed, yield the hashes inside the request body. From d64d97eee78645a67c436887bc5c69cf03b9d821 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:52:06 -0500 Subject: [PATCH 440/481] Update core.md --- specs/test_formats/ssz_static/core.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md index 059f11027..1d470c338 100644 --- a/specs/test_formats/ssz_static/core.md +++ b/specs/test_formats/ssz_static/core.md @@ -1,6 +1,6 @@ # Test format: SSZ static types -The goal of this type is to provide clients with a solid reference how the known SSZ objects should be encoded. +The goal of this type is to provide clients with a solid reference for how the known SSZ objects should be encoded. Each object described in the Phase-0 spec is covered. This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes do not support (or have alternatives for) generic SSZ encoding/decoding. @@ -27,6 +27,6 @@ A test-runner can implement the following assertions: ## References -**`serialized`**: [SSZ serialization](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#serialization) -**`root`** - [hash_tree_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#merkleization) function -**`signing_root`** - [signing_root](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/simple-serialize.md#self-signed-containers) function +**`serialized`**: [SSZ serialization](../../simple-serialize.md#serialization) +**`root`** - [hash_tree_root](../../simple-serialize.md#merkleization) function +**`signing_root`** - [signing_root](../../simple-serialize.md#self-signed-containers) function From 47773f0da562bbcf15d533c1b61404c89162005c Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:53:09 -0500 Subject: [PATCH 441/481] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index aa5b7e302..b2b369e11 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQs) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). +To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests. @@ -11,10 +11,10 @@ This repo hosts the current eth2.0 specifications. Discussions about design rati Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: * [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md) -* [Phase 1 -- Custody game](specs/core/1_custody-game.md) +* [Phase 1 -- Custody Game](specs/core/1_custody-game.md) * [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) -Accompanying documents can be found in [specs](specs) and include +Accompanying documents can be found in [specs](specs) and include: * [SimpleSerialize (SSZ) spec](specs/simple-serialize.md) * [BLS signature verification](specs/bls_signature.md) * [General test format](specs/test_formats/README.md) From cf1c78b2410a1982bdffaf375987c533c22ab047 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:55:15 -0500 Subject: [PATCH 442/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 184411925..83698a771 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- The Beacon Chain -**NOTICE**: This document is a work in progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](#ref-python-poc). +**NOTICE**: This document is a work in progress for researchers and implementers. It reflects recent spec changes and takes precedence over the Python proof-of-concept implementation [[python-poc]](https://github.com/ethereum/beacon_chain). ## Table of contents From afbec6851261561086bad312fbc41b2d7f2b58a3 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:57:15 -0500 Subject: [PATCH 443/481] Update README.md --- test_libs/pyspec/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index 20c01bde4..b9bf86220 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -58,4 +58,4 @@ The pyspec is not a replacement. ## License -Same as the spec itself, see LICENSE file in spec repository root. +Same as the spec itself, see [LICENSE](../../LICENSE) file in spec repository root. From f164702b704a296c29b05e45251a5749f6cd101f Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:59:15 -0500 Subject: [PATCH 444/481] Update README.md --- specs/test_formats/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md index da2e38c01..273659ce9 100644 --- a/specs/test_formats/README.md +++ b/specs/test_formats/README.md @@ -118,7 +118,7 @@ Separation of configuration and tests aims to: Note: Some clients prefer compile-time constants and optimizations. They should compile for each configuration once, and run the corresponding tests per build target. -The format is described in `configs/constant_presets`. +The format is described in [`configs/constant_presets`](../../configs/constant_presets/README.md#format). ## Fork-timeline @@ -129,7 +129,7 @@ A fork timeline is (preferably) loaded in as a configuration object into a clien - we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event), a client should be able to activate a fork dynamically. -The format is described in `configs/fork_timelines`. +The format is described in [`configs/fork_timelines`](../../configs/fork_timelines/README.md#format). ## Config sourcing From 1d59b335904a3b0f0c8952d129a2c98bec26d8ce Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:59:49 -0500 Subject: [PATCH 445/481] Update README.md --- test_generators/ssz_static/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_generators/ssz_static/README.md b/test_generators/ssz_static/README.md index d73556e1b..453d6d0e5 100644 --- a/test_generators/ssz_static/README.md +++ b/test_generators/ssz_static/README.md @@ -1,6 +1,6 @@ # SSZ-static The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: - the serialization and hashing of ETH 2.0 data types + the serialization and hashing of ETH 2.0 data types. Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md). From 2048b657b62b568d881b562671bc314152b13b7c Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 12:59:58 -0500 Subject: [PATCH 446/481] Update sign_msg.md --- specs/test_formats/bls/sign_msg.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/bls/sign_msg.md b/specs/test_formats/bls/sign_msg.md index dd93174f2..9916f2cc2 100644 --- a/specs/test_formats/bls/sign_msg.md +++ b/specs/test_formats/bls/sign_msg.md @@ -12,7 +12,7 @@ input: output: bytes96 -- expected signature ``` -All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. ## Condition From dbcac289c80e719b67e9e885feef756cd184d05a Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 13:00:05 -0500 Subject: [PATCH 447/481] Update priv_to_pub.md --- specs/test_formats/bls/priv_to_pub.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/bls/priv_to_pub.md b/specs/test_formats/bls/priv_to_pub.md index 7af148d0f..ef62241ae 100644 --- a/specs/test_formats/bls/priv_to_pub.md +++ b/specs/test_formats/bls/priv_to_pub.md @@ -9,7 +9,7 @@ input: bytes32 -- the private key output: bytes48 -- the public key ``` -All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. ## Condition From babf2721c7ef517673e1f3497899ed36aa8d6259 Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 13:00:15 -0500 Subject: [PATCH 448/481] Update msg_hash_g2_uncompressed.md --- specs/test_formats/bls/msg_hash_g2_uncompressed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/bls/msg_hash_g2_uncompressed.md b/specs/test_formats/bls/msg_hash_g2_uncompressed.md index f42ea9998..792fe1f03 100644 --- a/specs/test_formats/bls/msg_hash_g2_uncompressed.md +++ b/specs/test_formats/bls/msg_hash_g2_uncompressed.md @@ -11,7 +11,7 @@ input: output: List[List[bytes48]] -- 3 lists, each a length of two ``` -All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. ## Condition From 58c50c2f08af4ca83619d6ff21270df471984c5a Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 13:00:25 -0500 Subject: [PATCH 449/481] Update msg_hash_g2_compressed.md --- specs/test_formats/bls/msg_hash_g2_compressed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/test_formats/bls/msg_hash_g2_compressed.md b/specs/test_formats/bls/msg_hash_g2_compressed.md index 4e194e90b..2feeb92ba 100644 --- a/specs/test_formats/bls/msg_hash_g2_compressed.md +++ b/specs/test_formats/bls/msg_hash_g2_compressed.md @@ -11,7 +11,7 @@ input: output: List[bytes48] -- length of two ``` -All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. ## Condition From b6a085d0d7555c7ec3818b557930cd6087361f9d Mon Sep 17 00:00:00 2001 From: JSON <49416440+JSON@users.noreply.github.com> Date: Tue, 23 Apr 2019 13:01:21 -0500 Subject: [PATCH 450/481] Update bls_signature.md --- specs/bls_signature.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/bls_signature.md b/specs/bls_signature.md index beef19df5..18e2d8c9a 100644 --- a/specs/bls_signature.md +++ b/specs/bls_signature.md @@ -86,7 +86,7 @@ def hash_to_G2(message_hash: Bytes32, domain: uint64) -> [uint384]: ### `modular_squareroot` -`modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions the one with higher imaginary component is favored; if both solutions have equal imaginary component the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored). +`modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions, the one with higher imaginary component is favored; if both solutions have equal imaginary component, the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored). The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (i.e. integers `mod q`) and converts it to a regular integer. From bd0552c796b903c18c7690415c3391898528e5ae Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 23 Apr 2019 13:32:41 -0600 Subject: [PATCH 451/481] fix finality tests for each rule --- specs/core/0_beacon-chain.md | 8 - test_libs/pyspec/tests/test_finality.py | 165 +++++--------- test_libs/pyspec/tests/test_sanity.py | 27 --- tests/phase0/__init__.py | 0 .../test_process_block_header.py | 60 ------ .../block_processing/test_process_deposit.py | 140 ------------ .../block_processing/test_voluntary_exit.py | 175 --------------- tests/phase0/helpers.py | 203 ------------------ 8 files changed, 52 insertions(+), 726 deletions(-) delete mode 100644 tests/phase0/__init__.py delete mode 100644 tests/phase0/block_processing/test_process_block_header.py delete mode 100644 tests/phase0/block_processing/test_process_deposit.py delete mode 100644 tests/phase0/block_processing/test_voluntary_exit.py delete mode 100644 tests/phase0/helpers.py diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 171a120d0..084bec844 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1564,16 +1564,12 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.previous_justified_root = state.current_justified_root state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) - print(previous_boundary_attesting_balance) if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: - print("prev success") state.current_justified_epoch = get_previous_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 1) current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) - print(current_boundary_attesting_balance) if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: - print("cur success") state.current_justified_epoch = get_current_epoch(state) state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) state.justification_bitfield |= (1 << 0) @@ -1583,22 +1579,18 @@ def process_justification_and_finalization(state: BeaconState) -> None: current_epoch = get_current_epoch(state) # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: - print("rule 1") state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2: - print("rule 2") state.finalized_epoch = old_previous_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2: - print("rule 3") state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1: - print("rule 4") state.finalized_epoch = old_current_justified_epoch state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/tests/test_finality.py index ae8ba13ff..8c1b4e871 100644 --- a/test_libs/pyspec/tests/test_finality.py +++ b/test_libs/pyspec/tests/test_finality.py @@ -53,35 +53,23 @@ def next_epoch_with_attestations(state, post_state = deepcopy(state) blocks = [] for slot in range(spec.SLOTS_PER_EPOCH): - print("slot: %s", post_state.slot) block = build_empty_block_for_next_slot(post_state) - if fill_prev_epoch: - print("prev") - slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 - prev_attestation = get_valid_attestation(post_state, slot_to_attest) - block.body.attestations.append(prev_attestation) - if fill_cur_epoch: - print("cur") slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)): cur_attestation = get_valid_attestation(post_state, slot_to_attest) fill_aggregate_attestation(post_state, cur_attestation) block.body.attestations.append(cur_attestation) + if fill_prev_epoch: + slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 + prev_attestation = get_valid_attestation(post_state, slot_to_attest) + fill_aggregate_attestation(post_state, prev_attestation) + block.body.attestations.append(prev_attestation) + state_transition(post_state, block) blocks.append(block) - # if fill_prev_epoch: - # assert len(post_state.previous_epoch_attestations) >= 0 - # else: - # assert len(post_state.previous_epoch_attestations) == 0 - - # if fill_cur_epoch: - # assert len(post_state.current_epoch_attestations) >= 0 - # else: - # assert len(post_state.current_epoch_attestations) == 0 - return state, blocks, post_state @@ -90,20 +78,8 @@ def test_finality_from_genesis_rule_4(state): blocks = [] for epoch in range(6): - prev_state = deepcopy(test_state) - old_current_justified_epoch = test_state.current_justified_epoch - old_current_justified_root = test_state.current_justified_root - for slot in range(spec.SLOTS_PER_EPOCH): - attestation = None - slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - if slot_to_attest >= spec.GENESIS_SLOT: - attestation = get_valid_attestation(test_state, slot_to_attest) - fill_aggregate_attestation(test_state, attestation) - block = build_empty_block_for_next_slot(test_state) - if attestation: - block.body.attestations.append(attestation) - state_transition(test_state, block) - blocks.append(block) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) + blocks += new_blocks if epoch == 0: check_finality(test_state, prev_state, False, False, False) @@ -114,8 +90,8 @@ def test_finality_from_genesis_rule_4(state): elif epoch >= 3: # rule 4 of finaliy check_finality(test_state, prev_state, True, True, True) - assert test_state.finalized_epoch == old_current_justified_epoch - assert test_state.finalized_root == old_current_justified_root + assert test_state.finalized_epoch == prev_state.current_justified_epoch + assert test_state.finalized_root == prev_state.current_justified_root return state, blocks, test_state @@ -125,25 +101,13 @@ def test_finality_rule_1(state): next_epoch(state) next_epoch(state) + pre_state = deepcopy(state) test_state = deepcopy(state) blocks = [] for epoch in range(3): - prev_state = deepcopy(test_state) - old_previous_justified_epoch = test_state.previous_justified_epoch - old_previous_justified_root = test_state.previous_justified_root - for slot in range(spec.SLOTS_PER_EPOCH): - slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 - attestation = get_valid_attestation(test_state, slot_to_attest) - fill_aggregate_attestation(test_state, attestation) - block = build_empty_block_for_next_slot(test_state) - block.body.attestations.append(attestation) - state_transition(test_state, block) - - assert len(test_state.previous_epoch_attestations) >= 0 - assert len(test_state.current_epoch_attestations) == 0 - - blocks.append(block) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True) + blocks += new_blocks if epoch == 0: check_finality(test_state, prev_state, True, False, False) @@ -152,10 +116,10 @@ def test_finality_rule_1(state): elif epoch == 2: # finalized by rule 1 check_finality(test_state, prev_state, True, True, True) - assert test_state.finalized_epoch == old_previous_justified_epoch - assert test_state.finalized_root == old_previous_justified_root + assert test_state.finalized_epoch == prev_state.previous_justified_epoch + assert test_state.finalized_root == prev_state.previous_justified_root - return state, blocks, test_state + return pre_state, blocks, test_state def test_finality_rule_2(state): @@ -163,6 +127,7 @@ def test_finality_rule_2(state): next_epoch(state) next_epoch(state) + pre_state = deepcopy(state) test_state = deepcopy(state) blocks = [] @@ -170,85 +135,59 @@ def test_finality_rule_2(state): old_previous_justified_epoch = test_state.previous_justified_epoch old_previous_justified_root = test_state.previous_justified_root if epoch == 0: - prev_state, blocks, test_state = next_epoch_with_attestations(test_state, True, False) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) check_finality(test_state, prev_state, True, False, False) if epoch == 1: - prev_state, blocks, test_state = next_epoch_with_attestations(test_state, False, False) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False) check_finality(test_state, prev_state, False, True, False) if epoch == 2: - prev_state, blocks, test_state = next_epoch_with_attestations(test_state, False, True) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True) # finalized by rule 2 check_finality(test_state, prev_state, True, False, True) assert test_state.finalized_epoch == old_previous_justified_epoch assert test_state.finalized_root == old_previous_justified_root - return state, blocks, test_state + + blocks += new_blocks + + return pre_state, blocks, test_state def test_finality_rule_3(state): + """ + Test scenario described here + https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892 + """ + # get past first two epochs that finality does not run on next_epoch(state) next_epoch(state) + pre_state = deepcopy(state) test_state = deepcopy(state) blocks = [] - for epoch in range(2): - prev_state = deepcopy(test_state) - for slot in range(spec.SLOTS_PER_EPOCH): - slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - attestation = get_valid_attestation(test_state, slot_to_attest) - fill_aggregate_attestation(test_state, attestation) - block = build_empty_block_for_next_slot(test_state) - block.body.attestations.append(attestation) - state_transition(test_state, block) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) + blocks += new_blocks + check_finality(test_state, prev_state, True, False, False) - blocks.append(block) - if epoch == 0: - check_finality(test_state, prev_state, True, False, False) - if epoch == 1: - check_finality(test_state, prev_state, True, True, True) - - prev_state = deepcopy(test_state) - next_epoch(test_state) - check_finality(test_state, prev_state, False, True, False) - - - prev_state = deepcopy(test_state) - for slot in range(spec.SLOTS_PER_EPOCH): - slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 - attestation = get_valid_attestation(test_state, slot_to_attest) - fill_aggregate_attestation(test_state, attestation) - block = build_empty_block_for_next_slot(test_state) - block.body.attestations.append(attestation) - state_transition(test_state, block) - - assert len(test_state.previous_epoch_attestations) >= 0 - assert len(test_state.current_epoch_attestations) == 0 - - blocks.append(block) - check_finality(test_state, prev_state, True, False, True) - - - prev_state = deepcopy(test_state) - for slot in range(spec.SLOTS_PER_EPOCH): - prev_slot_to_attest = test_state.slot - spec.SLOTS_PER_EPOCH + 1 - prev_attestation = get_valid_attestation(test_state, prev_slot_to_attest) - fill_aggregate_attestation(test_state, prev_attestation) - - cur_slot_to_attest = test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 - cur_attestation = get_valid_attestation(test_state, cur_slot_to_attest) - fill_aggregate_attestation(test_state, cur_attestation) - - block = build_empty_block_for_next_slot(test_state) - block.body.attestations.append(prev_attestation) - block.body.attestations.append(cur_attestation) - - state_transition(test_state, block) - - assert len(test_state.previous_epoch_attestations) >= 0 - assert len(test_state.current_epoch_attestations) >= 0 - - blocks.append(block) + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) + blocks += new_blocks check_finality(test_state, prev_state, True, True, True) - return state, blocks, test_state + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False) + blocks += new_blocks + check_finality(test_state, prev_state, False, True, False) + + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True) + blocks += new_blocks + # rule 2 + check_finality(test_state, prev_state, True, False, True) + + prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, True) + blocks += new_blocks + # rule 3 + check_finality(test_state, prev_state, True, True, True) + assert test_state.finalized_epoch == prev_state.current_justified_epoch + assert test_state.finalized_root == prev_state.current_justified_root + + return pre_state, blocks, test_state diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index f0476f1c6..f330c2b19 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -53,33 +53,6 @@ from .helpers import ( pytestmark = pytest.mark.sanity -def check_finality(state, - prev_state, - current_justified_changed, - previous_justified_changed, - finalized_changed): - if current_justified_changed: - assert state.current_justified_epoch > prev_state.current_justified_epoch - assert state.current_justified_root != prev_state.current_justified_root - else: - assert state.current_justified_epoch == prev_state.current_justified_epoch - assert state.current_justified_root == prev_state.current_justified_root - - if previous_justified_changed: - assert state.previous_justified_epoch > prev_state.previous_justified_epoch - assert state.previous_justified_root != prev_state.previous_justified_root - else: - assert state.previous_justified_epoch == prev_state.previous_justified_epoch - assert state.previous_justified_root == prev_state.previous_justified_root - - if finalized_changed: - assert state.finalized_epoch > prev_state.finalized_epoch - assert state.finalized_root != prev_state.finalized_root - else: - assert state.finalized_epoch == prev_state.finalized_epoch - assert state.finalized_root == prev_state.finalized_root - - def test_slot_transition(state): test_state = deepcopy(state) cache_state(test_state) diff --git a/tests/phase0/__init__.py b/tests/phase0/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/tests/phase0/block_processing/test_process_block_header.py b/tests/phase0/block_processing/test_process_block_header.py deleted file mode 100644 index 241466437..000000000 --- a/tests/phase0/block_processing/test_process_block_header.py +++ /dev/null @@ -1,60 +0,0 @@ -from copy import deepcopy -import pytest - - -from build.phase0.spec import ( - get_beacon_proposer_index, - cache_state, - advance_slot, - process_block_header, -) -from tests.phase0.helpers import ( - build_empty_block_for_next_slot, -) - -# mark entire file as 'header' -pytestmark = pytest.mark.header - - -def prepare_state_for_header_processing(state): - cache_state(state) - advance_slot(state) - - -def run_block_header_processing(state, block, valid=True): - """ - Run ``process_block_header`` returning the pre and post state. - If ``valid == False``, run expecting ``AssertionError`` - """ - prepare_state_for_header_processing(state) - post_state = deepcopy(state) - - if not valid: - with pytest.raises(AssertionError): - process_block_header(post_state, block) - return state, None - - process_block_header(post_state, block) - return state, post_state - - -def test_success(state): - block = build_empty_block_for_next_slot(state) - pre_state, post_state = run_block_header_processing(state, block) - return state, block, post_state - - -def test_invalid_slot(state): - block = build_empty_block_for_next_slot(state) - block.slot = state.slot + 2 # invalid slot - - pre_state, post_state = run_block_header_processing(state, block, valid=False) - return pre_state, block, None - - -def test_invalid_previous_block_root(state): - block = build_empty_block_for_next_slot(state) - block.previous_block_root = b'\12'*32 # invalid prev root - - pre_state, post_state = run_block_header_processing(state, block, valid=False) - return pre_state, block, None diff --git a/tests/phase0/block_processing/test_process_deposit.py b/tests/phase0/block_processing/test_process_deposit.py deleted file mode 100644 index 0c5770195..000000000 --- a/tests/phase0/block_processing/test_process_deposit.py +++ /dev/null @@ -1,140 +0,0 @@ -from copy import deepcopy -import pytest - -import build.phase0.spec as spec - -from build.phase0.spec import ( - ZERO_HASH, - process_deposit, -) -from tests.phase0.helpers import ( - build_deposit, - privkeys, - pubkeys, -) - - -# mark entire file as 'voluntary_exits' -pytestmark = pytest.mark.voluntary_exits - - -def test_success(state): - pre_state = deepcopy(state) - # fill previous deposits with zero-hash - deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) - - index = len(deposit_data_leaves) - pubkey = pubkeys[index] - privkey = privkeys[index] - deposit, root, deposit_data_leaves = build_deposit( - pre_state, - deposit_data_leaves, - pubkey, - privkey, - spec.MAX_DEPOSIT_AMOUNT, - ) - - pre_state.latest_eth1_data.deposit_root = root - pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - - post_state = deepcopy(pre_state) - - process_deposit(post_state, deposit) - - assert len(post_state.validator_registry) == len(state.validator_registry) + 1 - assert len(post_state.validator_balances) == len(state.validator_balances) + 1 - assert post_state.validator_registry[index].pubkey == pubkeys[index] - assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count - - return pre_state, deposit, post_state - - -def test_success_top_up(state): - pre_state = deepcopy(state) - deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) - - validator_index = 0 - amount = spec.MAX_DEPOSIT_AMOUNT // 4 - pubkey = pubkeys[validator_index] - privkey = privkeys[validator_index] - deposit, root, deposit_data_leaves = build_deposit( - pre_state, - deposit_data_leaves, - pubkey, - privkey, - amount, - ) - - pre_state.latest_eth1_data.deposit_root = root - pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - pre_balance = pre_state.validator_balances[validator_index] - - post_state = deepcopy(pre_state) - - process_deposit(post_state, deposit) - - assert len(post_state.validator_registry) == len(state.validator_registry) - assert len(post_state.validator_balances) == len(state.validator_balances) - assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count - assert post_state.validator_balances[validator_index] == pre_balance + amount - - return pre_state, deposit, post_state - - -def test_wrong_index(state): - pre_state = deepcopy(state) - deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) - - - index = len(deposit_data_leaves) - pubkey = pubkeys[index] - privkey = privkeys[index] - deposit, root, deposit_data_leaves = build_deposit( - pre_state, - deposit_data_leaves, - pubkey, - privkey, - spec.MAX_DEPOSIT_AMOUNT, - ) - - # mess up deposit_index - deposit.index = pre_state.deposit_index + 1 - - pre_state.latest_eth1_data.deposit_root = root - pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - - post_state = deepcopy(pre_state) - - with pytest.raises(AssertionError): - process_deposit(post_state, deposit) - - return pre_state, deposit, None - - -def test_bad_merkle_proof(state): - pre_state = deepcopy(state) - deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry) - - index = len(deposit_data_leaves) - pubkey = pubkeys[index] - privkey = privkeys[index] - deposit, root, deposit_data_leaves = build_deposit( - pre_state, - deposit_data_leaves, - pubkey, - privkey, - spec.MAX_DEPOSIT_AMOUNT, - ) - - # mess up merkle branch - deposit.proof[-1] = spec.ZERO_HASH - - pre_state.latest_eth1_data.deposit_root = root - pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves) - - post_state = deepcopy(pre_state) - - with pytest.raises(AssertionError): - process_deposit(post_state, deposit) - - return pre_state, deposit, None diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py deleted file mode 100644 index 6adc81464..000000000 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ /dev/null @@ -1,175 +0,0 @@ -from copy import deepcopy -import pytest - -import build.phase0.spec as spec - -from build.phase0.spec import ( - get_active_validator_indices, - get_current_epoch, - process_voluntary_exit, -) -from tests.phase0.helpers import ( - build_voluntary_exit, - pubkey_to_privkey, -) - - -# mark entire file as 'voluntary_exits' -pytestmark = pytest.mark.voluntary_exits - - -def test_success(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - # - # build voluntary exit - # - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - post_state = deepcopy(pre_state) - - # - # test valid exit - # - process_voluntary_exit(post_state, voluntary_exit) - - assert not pre_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].initiated_exit - - return pre_state, voluntary_exit, post_state - - -def test_validator_not_active(state): - pre_state = deepcopy(state) - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # - # setup pre_state - # - pre_state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH - - # - # build and test voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - -def test_validator_already_exited(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has exited - pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - -def test_validator_already_initiated_exit(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - -def test_validator_not_active_long_enough(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - assert ( - current_epoch - pre_state.validator_registry[validator_index].activation_epoch < - spec.PERSISTENT_COMMITTEE_PERIOD - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py deleted file mode 100644 index 20054b821..000000000 --- a/tests/phase0/helpers.py +++ /dev/null @@ -1,203 +0,0 @@ -from copy import deepcopy - -from py_ecc import bls - -import build.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root -from build.phase0.spec import ( - # constants - EMPTY_SIGNATURE, - # SSZ - AttestationData, - Deposit, - DepositInput, - DepositData, - Eth1Data, - VoluntaryExit, - # functions - get_block_root, - get_current_epoch, - get_domain, - get_empty_block, - get_epoch_start_slot, - get_genesis_beacon_state, - verify_merkle_branch, - hash, -) -from build.phase0.utils.merkle_minimal import ( - calc_merkle_tree_from_leaves, - get_merkle_proof, - get_merkle_root, -) - - -privkeys = [i + 1 for i in range(1000)] -pubkeys = [bls.privtopub(privkey) for privkey in privkeys] -pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} - - -def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): - if not deposit_data_leaves: - deposit_data_leaves = [] - deposit_timestamp = 0 - proof_of_possession = b'\x33' * 96 - - deposit_data_list = [] - for i in range(num_validators): - pubkey = pubkeys[i] - deposit_data = DepositData( - amount=spec.MAX_DEPOSIT_AMOUNT, - timestamp=deposit_timestamp, - deposit_input=DepositInput( - pubkey=pubkey, - # insecurely use pubkey as withdrawal key as well - withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - proof_of_possession=proof_of_possession, - ), - ) - item = hash(deposit_data.serialize()) - deposit_data_leaves.append(item) - tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) - root = get_merkle_root((tuple(deposit_data_leaves))) - proof = list(get_merkle_proof(tree, item_index=i)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root) - deposit_data_list.append(deposit_data) - - genesis_validator_deposits = [] - for i in range(num_validators): - genesis_validator_deposits.append(Deposit( - proof=list(get_merkle_proof(tree, item_index=i)), - index=i, - deposit_data=deposit_data_list[i] - )) - return genesis_validator_deposits, root - - -def create_genesis_state(num_validators, deposit_data_leaves=None): - initial_deposits, deposit_root = create_mock_genesis_validator_deposits( - num_validators, - deposit_data_leaves, - ) - return get_genesis_beacon_state( - initial_deposits, - genesis_time=0, - genesis_eth1_data=Eth1Data( - deposit_root=deposit_root, - deposit_count=len(initial_deposits), - block_hash=spec.ZERO_HASH, - ), - ) - - -def force_registry_change_at_next_epoch(state): - # artificially trigger registry update at next epoch transition - state.finalized_epoch = get_current_epoch(state) - 1 - for crosslink in state.latest_crosslinks: - crosslink.epoch = state.finalized_epoch - state.validator_registry_update_epoch = state.finalized_epoch - 1 - - -def build_empty_block_for_next_slot(state): - empty_block = get_empty_block() - empty_block.slot = state.slot + 1 - previous_block_header = deepcopy(state.latest_block_header) - if previous_block_header.state_root == spec.ZERO_HASH: - previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = signed_root(previous_block_header) - return empty_block - - -def build_deposit_data(state, pubkey, privkey, amount): - deposit_input = DepositInput( - pubkey=pubkey, - # insecurely use pubkey as withdrawal key as well - withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:], - proof_of_possession=EMPTY_SIGNATURE, - ) - proof_of_possession = bls.sign( - message_hash=signed_root(deposit_input), - privkey=privkey, - domain=get_domain( - state.fork, - get_current_epoch(state), - spec.DOMAIN_DEPOSIT, - ) - ) - deposit_input.proof_of_possession = proof_of_possession - deposit_data = DepositData( - amount=amount, - timestamp=0, - deposit_input=deposit_input, - ) - return deposit_data - - -def build_attestation_data(state, slot, shard): - assert state.slot >= slot - - block_root = build_empty_block_for_next_slot(state).previous_block_root - - epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - if epoch_start_slot == slot: - epoch_boundary_root = block_root - else: - get_block_root(state, epoch_start_slot) - - if slot < epoch_start_slot: - justified_block_root = state.previous_justified_root - else: - justified_block_root = state.current_justified_root - - return AttestationData( - slot=slot, - shard=shard, - beacon_block_root=block_root, - source_epoch=state.current_justified_epoch, - source_root=justified_block_root, - target_root=epoch_boundary_root, - crosslink_data_root=spec.ZERO_HASH, - previous_crosslink=deepcopy(state.latest_crosslinks[shard]), - ) - - -def build_voluntary_exit(state, epoch, validator_index, privkey): - voluntary_exit = VoluntaryExit( - epoch=epoch, - validator_index=validator_index, - signature=EMPTY_SIGNATURE, - ) - voluntary_exit.signature = bls.sign( - message_hash=signed_root(voluntary_exit), - privkey=privkey, - domain=get_domain( - fork=state.fork, - epoch=epoch, - domain_type=spec.DOMAIN_VOLUNTARY_EXIT, - ) - ) - - return voluntary_exit - - -def build_deposit(state, - deposit_data_leaves, - pubkey, - privkey, - amount): - deposit_data = build_deposit_data(state, pubkey, privkey, amount) - - item = hash(deposit_data.serialize()) - index = len(deposit_data_leaves) - deposit_data_leaves.append(item) - tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) - root = get_merkle_root((tuple(deposit_data_leaves))) - proof = list(get_merkle_proof(tree, item_index=index)) - assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root) - - deposit = Deposit( - proof=list(proof), - index=index, - deposit_data=deposit_data, - ) - - return deposit, root, deposit_data_leaves From 53eb2d0368cd4df7692250bce88376655e956d43 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 23 Apr 2019 13:38:35 -0600 Subject: [PATCH 452/481] remove jsonize --- utils/phase0/jsonize.py | 52 ----------------------------------------- 1 file changed, 52 deletions(-) delete mode 100644 utils/phase0/jsonize.py diff --git a/utils/phase0/jsonize.py b/utils/phase0/jsonize.py deleted file mode 100644 index 816192ec6..000000000 --- a/utils/phase0/jsonize.py +++ /dev/null @@ -1,52 +0,0 @@ -from .minimal_ssz import hash_tree_root - - -def jsonize(value, typ, include_hash_tree_roots=False): - if isinstance(typ, str) and typ[:4] == 'uint': - return value - elif typ == 'bool': - assert value in (True, False) - return value - elif isinstance(typ, list): - return [jsonize(element, typ[0], include_hash_tree_roots) for element in value] - elif isinstance(typ, str) and typ[:4] == 'byte': - return '0x' + value.hex() - elif hasattr(typ, 'fields'): - ret = {} - for field, subtype in typ.fields.items(): - ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots) - if include_hash_tree_roots: - ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() - if include_hash_tree_roots: - ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() - return ret - else: - print(value, typ) - raise Exception("Type not recognized") - - -def dejsonize(json, typ): - if isinstance(typ, str) and typ[:4] == 'uint': - return json - elif typ == 'bool': - assert json in (True, False) - return json - elif isinstance(typ, list): - return [dejsonize(element, typ[0]) for element in json] - elif isinstance(typ, str) and typ[:4] == 'byte': - return bytes.fromhex(json[2:]) - elif hasattr(typ, 'fields'): - temp = {} - for field, subtype in typ.fields.items(): - temp[field] = dejsonize(json[field], subtype) - if field + "_hash_tree_root" in json: - assert(json[field + "_hash_tree_root"][2:] == - hash_tree_root(temp[field], subtype).hex()) - ret = typ(**temp) - if "hash_tree_root" in json: - assert(json["hash_tree_root"][2:] == - hash_tree_root(ret, typ).hex()) - return ret - else: - print(json, typ) - raise Exception("Type not recognized") From f2d885f0d8e652ab48aa9dfb91305f7e15d65e3c Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 24 Apr 2019 14:23:51 +1000 Subject: [PATCH 453/481] Address Danny's comments --- specs/core/0_beacon-chain.md | 125 ++++++++++++++++++----------------- 1 file changed, 63 insertions(+), 62 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 931511b36..f61eb2ec5 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -66,8 +66,8 @@ - [`get_shard_delta`](#get_shard_delta) - [`compute_committee`](#compute_committee) - [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot) + - [`get_block_root_at_slot`](#get_block_root_at_slot) - [`get_block_root`](#get_block_root) - - [`get_state_root`](#get_state_root) - [`get_randao_mix`](#get_randao_mix) - [`get_active_index_root`](#get_active_index_root) - [`generate_seed`](#generate_seed) @@ -416,6 +416,8 @@ The types are defined topologically to aid in facilitating an executable version 'data': AttestationData, # Inclusion slot 'inclusion_slot': 'uint64', + # Proposer index + 'proposer_index': 'uint64', } ``` @@ -678,6 +680,7 @@ def get_epoch_start_slot(epoch: Epoch) -> Slot: ``` ### `is_active_validator` + ```python def is_active_validator(validator: Validator, epoch: Epoch) -> bool: """ @@ -687,15 +690,14 @@ def is_active_validator(validator: Validator, epoch: Epoch) -> bool: ``` ### `is_slashable_validator` + ```python def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is slashable. """ - return ( - validator.activation_epoch <= epoch < validator.withdrawable_epoch and - validator.slashed is False - ) + return validator.slashed is False and (validator.activation_epoch <= epoch < validator.withdrawable_epoch) + ``` ### `get_active_validator_indices` @@ -772,12 +774,12 @@ def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: """ Return the number of committees in one epoch. """ - active_validators = get_active_validator_indices(state, epoch) + active_validator_indices = get_active_validator_indices(state, epoch) return max( 1, min( SHARD_COUNT // SLOTS_PER_EPOCH, - len(active_validators) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + len(active_validator_indices) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, ) ) * SLOTS_PER_EPOCH ``` @@ -850,11 +852,11 @@ def get_crosslink_committees_at_slot(state: BeaconState, ] ``` -### `get_block_root` +### `get_block_root_at_slot` ```python -def get_block_root(state: BeaconState, - slot: Slot) -> Bytes32: +def get_block_root_at_slot(state: BeaconState, + slot: Slot) -> Bytes32: """ Return the block root at a recent ``slot``. """ @@ -862,19 +864,17 @@ def get_block_root(state: BeaconState, return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] ``` -`get_block_root(_, s)` should always return `signing_root` of the block in the beacon chain at slot `s`, and `get_crosslink_committees_at_slot(_, s)` should not change unless the [validator](#dfn-validator) registry changes. - -### `get_state_root` +### `get_block_root` ```python -def get_state_root(state: BeaconState, - slot: Slot) -> Bytes32: +def get_block_root(state: BeaconState, + epoch: Epoch) -> Bytes32: """ - Return the state root at a recent ``slot``. + Return the block root at a recent ``epoch``. """ - assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT - return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] + return get_block_root_at_slot(state, get_epoch_start_slot(epoch)) ``` + ### `get_randao_mix` ```python @@ -917,17 +917,17 @@ def generate_seed(state: BeaconState, ### `get_beacon_proposer_index` ```python -def get_beacon_proposer_index(state: BeaconState, slot: Slot=None) -> ValidatorIndex: +def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ Return the beacon proposer index at ``slot``. """ - epoch = slot_to_epoch(slot if slot != None else state.slot) - first_committee, _ = get_crosslink_committees_at_slot(state, slot if slot != None else state.slot)[0] + epoch = slot_to_epoch(state.slot) + first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0] + MAX_RANDOM_BYTE = 2**8 - 1 i = 0 while True: candidate_index = first_committee[(epoch + i) % len(first_committee)] random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32] - MAX_RANDOM_BYTE = 2**8 - 1 effective_balance = state.validator_registry[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_DEPOSIT_AMOUNT * random_byte: return candidate_index @@ -1449,7 +1449,7 @@ def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[P def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: return [ a for a in get_matching_source_attestations(state, epoch) - if a.data.target_root == get_block_root(state, get_epoch_start_slot(epoch)) + if a.data.target_root == get_block_root(state, epoch) ] ``` @@ -1457,7 +1457,7 @@ def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[P def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: return [ a for a in get_matching_source_attestations(state, epoch) - if a.data.beacon_block_root == get_block_root(state, a.data.slot) + if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot) ] ``` @@ -1484,7 +1484,7 @@ def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationDat ``` ```python -def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: +def get_winning_crosslink_and_attesting_indices(state: BeaconState, shard: Shard, epoch: Epoch) -> Tuple[Crosslink, List[ValidatorIndex]]: attestations = get_matching_source_attestations(state, epoch) shard_attestations = [a for a in attestations if a.data.shard == shard] shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] @@ -1533,12 +1533,12 @@ def process_justification_and_finalization(state: BeaconState) -> None: previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, previous_epoch)) if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: state.current_justified_epoch = get_previous_epoch(state) - state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) + state.current_justified_root = get_block_root(state, state.current_justified_epoch) state.justification_bitfield |= (1 << 1) current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, current_epoch)) if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: state.current_justified_epoch = get_current_epoch(state) - state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) + state.current_justified_root = get_block_root(state, state.current_justified_epoch) state.justification_bitfield |= (1 << 0) # Process finalizations @@ -1546,19 +1546,19 @@ def process_justification_and_finalization(state: BeaconState) -> None: # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: state.finalized_epoch = old_previous_justified_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + state.finalized_root = get_block_root(state, state.finalized_epoch) # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2: state.finalized_epoch = old_previous_justified_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + state.finalized_root = get_block_root(state, state.finalized_epoch) # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2: state.finalized_epoch = old_current_justified_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + state.finalized_root = get_block_root(state, state.finalized_epoch) # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1: state.finalized_epoch = old_current_justified_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + state.finalized_root = get_block_root(state, state.finalized_epoch) ``` #### Crosslinks @@ -1573,7 +1573,7 @@ def process_crosslinks(state: BeaconState) -> None: for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): epoch = slot_to_epoch(slot) for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, shard, epoch) if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee): state.current_crosslinks[shard] = winning_crosslink ``` @@ -1595,40 +1595,40 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: previous_epoch = get_previous_epoch(state) total_balance = get_total_active_balance(state) - eligible_validator_indices = [ - index for index, validator in enumerate(state.validator_registry) - if is_active_validator(validator, previous_epoch) or (validator.slashed and previous_epoch < validator.withdrawable_epoch) - ] rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] - for index in eligible_validator_indices: - base_reward = get_base_reward(state, index) + eligible_validator_indices = [ + index for index, v in enumerate(state.validator_registry) + if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch) + ] - # Micro-incentives for attestations matching FFG source, FFG target, and head - for attestations in ( - get_matching_source_attestations(state, previous_epoch), - get_matching_target_attestations(state, previous_epoch), - get_matching_source_attestations(state, previous_epoch), - ): - if index in get_unslashed_attesting_indices(state, attestations): - rewards[index] += base_reward * get_attesting_balance(state, attestations) // total_balance + # Micro-incentives for matching FFG source, FFG target, and head + matching_source_attestations = get_matching_source_attestations(state, previous_epoch) + matching_target_attestations = get_matching_target_attestations(state, previous_epoch) + matching_head_attestations = get_matching_head_attestations(state, previous_epoch) + for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations): + unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations) + attesting_balance = get_attesting_balance(state, attestations) + for index in eligible_validator_indices: + if index in unslashed_attesting_indices: + rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance else: - penalties[index] += base_reward + penalties[index] += get_base_reward(state, index) - if index in get_unslashed_attesting_indices(state, get_matching_source_attestations(state, previous_epoch)): - earliest_attestation = get_earliest_attestation(state, get_matching_source_attestations(state, previous_epoch), index) - # Proposer micro-rewards - proposer_index = get_beacon_proposer_index(state, earliest_attestation.inclusion_slot) - rewards[proposer_index] += base_reward // PROPOSER_REWARD_QUOTIENT - # Inclusion delay micro-rewards - inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot - rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay + # Proposer and inclusion delay micro-rewards + if index in get_unslashed_attesting_indices(state, matching_source_attestations): + earliest_attestation = get_earliest_attestation(state, matching_source_attestations, index) + rewards[earliest_attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT + inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot + rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay - # Inactivity penalty - finality_delay = previous_epoch - state.finalized_epoch - if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY: - penalties[index] += BASE_REWARDS_PER_EPOCH * base_reward - if index not in get_unslashed_attesting_indices(state, get_matching_target_attestations(state, previous_epoch)): + # Inactivity penalty + finality_delay = previous_epoch - state.finalized_epoch + if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY: + matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations) + for index in eligible_validator_indices: + penalties[index] += BASE_REWARDS_PER_EPOCH * get_base_reward(state, index) + if index not in matching_target_attesting_indices: penalties[index] += state.validator_registry[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT return [rewards, penalties] @@ -1641,7 +1641,7 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))): epoch = slot_to_epoch(slot) for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, epoch, shard) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, shard, epoch) attesting_balance = get_total_balance(state, attesting_indices) committee_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: @@ -1915,7 +1915,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: pending_attestation = PendingAttestation( data=data, aggregation_bitfield=attestation.aggregation_bitfield, - inclusion_slot=state.slot + inclusion_slot=state.slot, + proposer_index=get_beacon_proposer_index(state), ) if target_epoch == get_current_epoch(state): state.current_epoch_attestations.append(pending_attestation) From 5587c44abe725afccd08fdad1667a03c0ada87f2 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 14:29:35 +1000 Subject: [PATCH 454/481] Update test_libs/pyspec/tests/test_sanity.py Co-Authored-By: JustinDrake --- test_libs/pyspec/tests/test_sanity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index f87a7c808..101bcc89c 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -436,7 +436,7 @@ def test_balance_driven_status_transitions(state): assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH # set validator balance to below ejection threshold - pre_state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE - 1 + pre_state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE post_state = deepcopy(pre_state) # From df64eeefa07eb58364fe1eb86eb351f63bf32e86 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 24 Apr 2019 14:46:28 +1000 Subject: [PATCH 455/481] Start fixing tests --- specs/core/0_beacon-chain.md | 8 ++++---- .../tests/block_processing/test_process_transfer.py | 10 ++++------ test_libs/pyspec/tests/helpers.py | 1 - 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 033e3b2cb..159d2d839 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -931,7 +931,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: candidate_index = first_committee[(current_epoch + i) % len(first_committee)] random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32] effective_balance = state.validator_registry[candidate_index].effective_balance - if effective_balance * MAX_RANDOM_BYTE >= MAX_DEPOSIT_AMOUNT * random_byte: + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index i += 1 ``` @@ -1291,7 +1291,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Process genesis activations for index, validator in enumerate(state.validator_registry): - if validator.effective_balance >= MAX_DEPOSIT_AMOUNT: + if validator.effective_balance >= MAX_EFFECTIVE_BALANCE: validator.activation_eligibility_epoch = GENESIS_EPOCH validator.activation_epoch = GENESIS_EPOCH @@ -1662,7 +1662,7 @@ Run the following function: def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validator_registry): - if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance >= MAX_DEPOSIT_AMOUNT: + if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance >= MAX_EFFECTIVE_BALANCE: validator.activation_eligibility_epoch = get_current_epoch(state) if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: @@ -1717,7 +1717,7 @@ def process_final_updates(state: BeaconState) -> None: state.eth1_data_votes = [] # Update effective balances with hysteresis for index, validator in enumerate(state.validator_registry): - balance = min(state.balances[index], MAX_DEPOSIT_AMOUNT) + balance = min(state.balances[index], MAX_EFFECTIVE_BALANCE) HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2 if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance: validator.effective_balance = balance - balance % EFFECTIVE_BALANCE_INCREMENT diff --git a/test_libs/pyspec/tests/block_processing/test_process_transfer.py b/test_libs/pyspec/tests/block_processing/test_process_transfer.py index 71d0894bd..65df822de 100644 --- a/test_libs/pyspec/tests/block_processing/test_process_transfer.py +++ b/test_libs/pyspec/tests/block_processing/test_process_transfer.py @@ -5,11 +5,9 @@ import eth2spec.phase0.spec as spec from eth2spec.phase0.spec import ( get_active_validator_indices, - get_balance, get_beacon_proposer_index, get_current_epoch, process_transfer, - set_balance, ) from tests.helpers import ( get_valid_transfer, @@ -75,7 +73,7 @@ def test_success_withdrawable(state): def test_success_active_above_max_effective(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE // 32 - set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE + amount) + state.validator_registry[sender_index] = spec.MAX_EFFECTIVE_BALANCE + amount transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) pre_state, post_state = run_transfer_processing(state, transfer) @@ -86,7 +84,7 @@ def test_success_active_above_max_effective(state): def test_active_but_transfer_past_effective_balance(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE // 32 - set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE) + state.validator_registry[sender_index] = spec.MAX_EFFECTIVE_BALANCE transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) pre_state, post_state = run_transfer_processing(state, transfer, False) @@ -107,7 +105,7 @@ def test_incorrect_slot(state): def test_insufficient_balance(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE - set_balance(state, sender_index, spec.MAX_EFFECTIVE_BALANCE) + state.validator_registry[sender_index] = spec.MAX_EFFECTIVE_BALANCE transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount + 1, fee=0) # un-activate so validator can transfer @@ -140,4 +138,4 @@ def test_invalid_pubkey(state): pre_state, post_state = run_transfer_processing(state, transfer, False) - return pre_state, transfer, post_state \ No newline at end of file + return pre_state, transfer, post_state diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 162c6fa62..465825c35 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -26,7 +26,6 @@ from eth2spec.phase0.spec import ( # functions convert_to_indexed, get_active_validator_indices, - get_balance, get_attesting_indices, get_block_root, get_crosslink_committees_at_slot, From 55f042aa71c8a6a3f62e803077ecb77f33a3b432 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 24 Apr 2019 15:17:25 +1000 Subject: [PATCH 456/481] More fixes --- specs/core/0_beacon-chain.md | 4 ++-- test_libs/pyspec/tests/helpers.py | 9 ++++++--- test_libs/pyspec/tests/test_sanity.py | 16 ++++++++++++---- tests/phase0/helpers.py | 3 ++- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 159d2d839..3dd4c944c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -929,7 +929,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: i = 0 while True: candidate_index = first_committee[(current_epoch + i) % len(first_committee)] - random_byte = hash(generate_seed(state, epoch) + int_to_bytes8(i // 32))[i % 32] + random_byte = hash(generate_seed(state, current_epoch) + int_to_bytes8(i // 32))[i % 32] effective_balance = state.validator_registry[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index @@ -2011,7 +2011,7 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: assert ( state.validator_registry[transfer.sender].activation_eligibility_epoch == FAR_FUTURE_EPOCH or get_current_epoch(state) >= state.validator_registry[transfer.sender].withdrawable_epoch or - transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= get_balance(state, transfer.sender) + transfer.amount + transfer.fee + MAX_EFFECTIVE_BALANCE <= state.balances[transfer.sender] ) # Verify that the pubkey is valid assert ( diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index 465825c35..cf3d2624a 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -28,6 +28,7 @@ from eth2spec.phase0.spec import ( get_active_validator_indices, get_attesting_indices, get_block_root, + get_block_root_at_slot, get_crosslink_committees_at_slot, get_current_epoch, get_domain, @@ -51,9 +52,11 @@ privkeys = [i + 1 for i in range(1000)] pubkeys = [bls.privtopub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} + def get_balance(state, index): return state.balances[index] + def set_bitfield_bit(bitfield, i): """ Set the bit in ``bitfield`` at position ``i`` to ``1``. @@ -151,16 +154,16 @@ def build_attestation_data(state, slot, shard): if slot == state.slot: block_root = build_empty_block_for_next_slot(state).previous_block_root else: - block_root = get_block_root(state, slot) + block_root = get_block_root_at_slot(state, slot) current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) if slot < current_epoch_start_slot: print(slot) - epoch_boundary_root = get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) + epoch_boundary_root = get_block_root(state, get_previous_epoch(state)) elif slot == current_epoch_start_slot: epoch_boundary_root = block_root else: - epoch_boundary_root = get_block_root(state, current_epoch_start_slot) + epoch_boundary_root = get_block_root(state, get_current_epoch(state)) if slot < current_epoch_start_slot: justified_epoch = state.previous_justified_epoch diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 182c9cfe0..b54b5f75a 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -9,6 +9,7 @@ from eth2spec.utils.minimal_ssz import signing_root from eth2spec.phase0.spec import ( # constants ZERO_HASH, + SLOTS_PER_HISTORICAL_ROOT, # SSZ Deposit, Transfer, @@ -17,9 +18,9 @@ from eth2spec.phase0.spec import ( get_active_validator_indices, get_beacon_proposer_index, get_block_root, + get_block_root_at_slot, get_current_epoch, get_domain, - get_state_root, advance_slot, cache_state, slot_to_epoch, @@ -48,6 +49,13 @@ from .helpers import ( ) +def get_state_root(state, slot) -> bytes: + """ + Return the state root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] + # mark entire file as 'sanity' pytestmark = pytest.mark.sanity @@ -94,7 +102,7 @@ def test_empty_block_transition(state): state_transition(test_state, block) assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1 - assert get_block_root(test_state, state.slot) == block.previous_block_root + assert get_block_root_at_slot(test_state, state.slot) == block.previous_block_root return state, [block], test_state @@ -108,7 +116,7 @@ def test_skipped_slots(state): assert test_state.slot == block.slot for slot in range(state.slot, test_state.slot): - assert get_block_root(test_state, slot) == block.previous_block_root + assert get_block_root_at_slot(test_state, slot) == block.previous_block_root return state, [block], test_state @@ -122,7 +130,7 @@ def test_empty_epoch_transition(state): assert test_state.slot == block.slot for slot in range(state.slot, test_state.slot): - assert get_block_root(test_state, slot) == block.previous_block_root + assert get_block_root_at_slot(test_state, slot) == block.previous_block_root return state, [block], test_state diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index 20054b821..c042dec91 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -16,6 +16,7 @@ from build.phase0.spec import ( VoluntaryExit, # functions get_block_root, + get_block_root_at_slot, get_current_epoch, get_domain, get_empty_block, @@ -141,7 +142,7 @@ def build_attestation_data(state, slot, shard): if epoch_start_slot == slot: epoch_boundary_root = block_root else: - get_block_root(state, epoch_start_slot) + get_block_root(state, get_current_epoch(state)) if slot < epoch_start_slot: justified_block_root = state.previous_justified_root From c37789dc5d5085b535d8d71ae2dc608fdb2fdd7d Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 24 Apr 2019 15:27:47 +1000 Subject: [PATCH 457/481] Tests fixed --- specs/core/0_beacon-chain.md | 4 ++-- .../pyspec/tests/block_processing/test_process_transfer.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 3dd4c944c..295064f77 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -921,9 +921,9 @@ def generate_seed(state: BeaconState, ```python def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ - Return the beacon proposer index at ``slot``. + Return the beacon proposer index at ``state.slot``. """ - current_epoch = slot_to_epoch(state.slot) + current_epoch = get_current_epoch(state) first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0] MAX_RANDOM_BYTE = 2**8 - 1 i = 0 diff --git a/test_libs/pyspec/tests/block_processing/test_process_transfer.py b/test_libs/pyspec/tests/block_processing/test_process_transfer.py index 65df822de..0eeaa7792 100644 --- a/test_libs/pyspec/tests/block_processing/test_process_transfer.py +++ b/test_libs/pyspec/tests/block_processing/test_process_transfer.py @@ -73,7 +73,7 @@ def test_success_withdrawable(state): def test_success_active_above_max_effective(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE // 32 - state.validator_registry[sender_index] = spec.MAX_EFFECTIVE_BALANCE + amount + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + amount transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) pre_state, post_state = run_transfer_processing(state, transfer) @@ -84,7 +84,7 @@ def test_success_active_above_max_effective(state): def test_active_but_transfer_past_effective_balance(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE // 32 - state.validator_registry[sender_index] = spec.MAX_EFFECTIVE_BALANCE + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0) pre_state, post_state = run_transfer_processing(state, transfer, False) @@ -105,7 +105,7 @@ def test_incorrect_slot(state): def test_insufficient_balance(state): sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1] amount = spec.MAX_EFFECTIVE_BALANCE - state.validator_registry[sender_index] = spec.MAX_EFFECTIVE_BALANCE + state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount + 1, fee=0) # un-activate so validator can transfer From b361fdb385e1fd4d025f1e342e2f9d303f4bf642 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 24 Apr 2019 15:29:46 +1000 Subject: [PATCH 458/481] bug --- specs/core/0_beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 295064f77..c5f13710b 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1603,11 +1603,12 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: penalties[index] += get_base_reward(state, index) # Proposer and inclusion delay micro-rewards - if index in get_unslashed_attesting_indices(state, matching_source_attestations): - earliest_attestation = get_earliest_attestation(state, matching_source_attestations, index) - rewards[earliest_attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT - inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot - rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay + for index in eligible_validator_indices: + if index in get_unslashed_attesting_indices(state, matching_source_attestations): + earliest_attestation = get_earliest_attestation(state, matching_source_attestations, index) + rewards[earliest_attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT + inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot + rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay # Inactivity penalty finality_delay = previous_epoch - state.finalized_epoch From 4734b2288348fb9e9a2b1cd803872f4e61a8be42 Mon Sep 17 00:00:00 2001 From: Justin Drake Date: Wed, 24 Apr 2019 15:32:43 +1000 Subject: [PATCH 459/481] simplify --- specs/core/0_beacon-chain.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index c5f13710b..4a9dcc26c 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1603,12 +1603,11 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: penalties[index] += get_base_reward(state, index) # Proposer and inclusion delay micro-rewards - for index in eligible_validator_indices: - if index in get_unslashed_attesting_indices(state, matching_source_attestations): - earliest_attestation = get_earliest_attestation(state, matching_source_attestations, index) - rewards[earliest_attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT - inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot - rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay + for index in get_unslashed_attesting_indices(state, matching_source_attestations): + earliest_attestation = get_earliest_attestation(state, matching_source_attestations, index) + rewards[earliest_attestation.proposer_index] += get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT + inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot + rewards[index] += get_base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay # Inactivity penalty finality_delay = previous_epoch - state.finalized_epoch From 1284b93416cebb0fd4b58059ed72aa7332a78814 Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 24 Apr 2019 15:53:28 +1000 Subject: [PATCH 460/481] Update simple-serialize.md --- specs/simple-serialize.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 3f02335db..882147f0f 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -55,7 +55,7 @@ For convenience we alias: We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. -> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, `is_fixed_size`, `is_variable_size`, etc.) objects implicitly carry their type. +> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, `is_variable_size`, etc.) objects implicitly carry their type. ### `"uintN"` @@ -75,8 +75,8 @@ return b"\x01" if value is True else b"\x00" ```python # Reccursively serialize -fixed_parts = [serialize(element) if is_fixed_size(element) else None for element in value] -variable_parts = [serialize(element) if is_variable_size(element) else "" for element in value] +fixed_parts = [serialize(element) if not is_variable_size(element) else None for element in value] +variable_parts = [serialize(element) if is_variable_size(element) else b"" for element in value] # Compute and check lengths fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part in fixed_parts] @@ -88,7 +88,7 @@ variable_offsets = [serialize(sum(fixed_lengths + variable_lengths[:i])) for i i fixed_parts = [part if part != None else variable_offsets[i] for i, part in enumerate(fixed_parts)] # Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts -return "".join(fixed_parts + variable_parts) +return b"".join(fixed_parts + variable_parts) ``` ## Deserialization @@ -112,7 +112,7 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi ## Self-signed containers -Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signed_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. +Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. ## Implementations From 7f5cffb2863eba1cae815e59f00d87e5b14c062d Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 22 Apr 2019 17:46:13 +1000 Subject: [PATCH 461/481] pytests use configuration system now, add command option to conftest to switch, also fix minor testing bug --- Makefile | 4 +- test_libs/pyspec/README.md | 4 +- test_libs/pyspec/requirements-testing.txt | 3 ++ test_libs/pyspec/requirements.txt | 1 - test_libs/pyspec/tests/README.md | 0 test_libs/pyspec/tests/conftest.py | 52 ++++------------------- test_libs/pyspec/tests/helpers.py | 1 + 7 files changed, 17 insertions(+), 48 deletions(-) create mode 100644 test_libs/pyspec/requirements-testing.txt delete mode 100644 test_libs/pyspec/tests/README.md diff --git a/Makefile b/Makefile index 6586646fd..d00817daf 100644 --- a/Makefile +++ b/Makefile @@ -34,10 +34,10 @@ install_test: cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; test: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest -m minimal_config . + cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest . citest: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml -m minimal_config . + cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index b9bf86220..df1834210 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -38,7 +38,7 @@ Install dependencies: ```bash python3 -m venv venv . venv/bin/activate -pip3 install -r requirements.txt +pip3 install -r requirements-testing.txt ``` Note: make sure to run `make -B pyspec` from the root of the specs repository, to build the parts of the pyspec module derived from the markdown specs. @@ -46,7 +46,7 @@ The `-B` flag may be helpful to force-overwrite the `pyspec` output after you ma Run the tests: ``` -pytest -m minimal_config . +pytest --config=minimal ``` diff --git a/test_libs/pyspec/requirements-testing.txt b/test_libs/pyspec/requirements-testing.txt new file mode 100644 index 000000000..388a878a9 --- /dev/null +++ b/test_libs/pyspec/requirements-testing.txt @@ -0,0 +1,3 @@ +-r requirements.txt +pytest>=3.6,<3.7 +../config_helpers diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt index 3296ef807..78d41708d 100644 --- a/test_libs/pyspec/requirements.txt +++ b/test_libs/pyspec/requirements.txt @@ -2,4 +2,3 @@ eth-utils>=1.3.0,<2 eth-typing>=2.1.0,<3.0.0 pycryptodome==3.7.3 py_ecc>=1.6.0 -pytest>=3.6,<3.7 diff --git a/test_libs/pyspec/tests/README.md b/test_libs/pyspec/tests/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/test_libs/pyspec/tests/conftest.py b/test_libs/pyspec/tests/conftest.py index bf9b1009b..f6c70d280 100644 --- a/test_libs/pyspec/tests/conftest.py +++ b/test_libs/pyspec/tests/conftest.py @@ -1,58 +1,24 @@ import pytest from eth2spec.phase0 import spec +from preset_loader import loader from .helpers import ( create_genesis_state, ) -DEFAULT_CONFIG = {} # no change - -MINIMAL_CONFIG = { - "SHARD_COUNT": 8, - "MIN_ATTESTATION_INCLUSION_DELAY": 2, - "TARGET_COMMITTEE_SIZE": 4, - "SLOTS_PER_EPOCH": 8, - "SLOTS_PER_HISTORICAL_ROOT": 64, - "LATEST_RANDAO_MIXES_LENGTH": 64, - "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, - "LATEST_SLASHED_EXIT_LENGTH": 64, -} - - -def overwrite_spec_config(config): - for field in config: - setattr(spec, field, config[field]) - if field == "LATEST_RANDAO_MIXES_LENGTH": - spec.BeaconState.fields['latest_randao_mixes'][1] = config[field] - elif field == "SHARD_COUNT": - spec.BeaconState.fields['current_crosslinks'][1] = config[field] - spec.BeaconState.fields['previous_crosslinks'][1] = config[field] - elif field == "SLOTS_PER_HISTORICAL_ROOT": - spec.BeaconState.fields['latest_block_roots'][1] = config[field] - spec.BeaconState.fields['latest_state_roots'][1] = config[field] - spec.HistoricalBatch.fields['block_roots'][1] = config[field] - spec.HistoricalBatch.fields['state_roots'][1] = config[field] - elif field == "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": - spec.BeaconState.fields['latest_active_index_roots'][1] = config[field] - elif field == "LATEST_SLASHED_EXIT_LENGTH": - spec.BeaconState.fields['latest_slashed_balances'][1] = config[field] - - -@pytest.fixture( - params=[ - pytest.param(MINIMAL_CONFIG, marks=pytest.mark.minimal_config), - DEFAULT_CONFIG, - ] -) -def config(request): - return request.param +def pytest_addoption(parser): + parser.addoption( + "--config", action="store", default="minimal", help="config: make the pyspec use the specified configuration" + ) @pytest.fixture(autouse=True) -def overwrite_config(config): - overwrite_spec_config(config) +def config(request): + config_name = request.config.getoption("--config") + presets = loader.load_presets('../../configs/', config_name) + spec.apply_constants_preset(presets) @pytest.fixture diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index d181dadfe..a728a6acc 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -118,6 +118,7 @@ def create_genesis_state(num_validators, deposit_data_leaves=None): def build_empty_block_for_next_slot(state): empty_block = BeaconBlock() empty_block.slot = state.slot + 1 + empty_block.body.eth1_data.deposit_count = state.deposit_index previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() From aaafe92c5f3a59fd4b902b6ef89138daac760430 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 22 Apr 2019 17:54:58 +1000 Subject: [PATCH 462/481] update makefile to install requirements for tests correctly --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d00817daf..73d8adea8 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS) # installs the packages to run pyspec tests install_test: - cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; + cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt; test: $(PY_SPEC_ALL_TARGETS) cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest . From 02e6d5b46ca74678567bb1f43cb4d25def9c8371 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 22 Apr 2019 17:59:01 +1000 Subject: [PATCH 463/481] make circle CI aware of the testing requirments file --- .circleci/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9a7172866..4f806b00f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -61,13 +61,13 @@ jobs: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_cached_venv: venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}' + reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' - run: name: Install pyspec requirements command: make install_test - save_cached_venv: venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}' + reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' venv_path: ./test_libs/pyspec/venv test: docker: @@ -78,7 +78,7 @@ jobs: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_cached_venv: venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}' + reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' - run: name: Run py-tests command: make citest From 97906a633910b8989576dc7371f4ca438d435cdf Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 22 Apr 2019 18:07:55 +0800 Subject: [PATCH 464/481] Only use `setup.py` --- .circleci/config.yml | 6 ++--- Makefile | 2 +- test_generators/README.md | 2 +- test_libs/config_helpers/requirements.txt | 1 - test_libs/config_helpers/setup.py | 15 +++++++++--- test_libs/gen_helpers/requirements.txt | 2 -- test_libs/gen_helpers/setup.py | 18 ++++++++++---- test_libs/pyspec/README.md | 2 +- test_libs/pyspec/requirements-testing.txt | 3 --- test_libs/pyspec/requirements.txt | 4 ---- test_libs/pyspec/setup.py | 29 ++++++++++++++++++----- 11 files changed, 55 insertions(+), 29 deletions(-) delete mode 100644 test_libs/config_helpers/requirements.txt delete mode 100644 test_libs/gen_helpers/requirements.txt delete mode 100644 test_libs/pyspec/requirements-testing.txt delete mode 100644 test_libs/pyspec/requirements.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index 4f806b00f..28d949de4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -61,13 +61,13 @@ jobs: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_cached_venv: venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' + reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}' - run: name: Install pyspec requirements command: make install_test - save_cached_venv: venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' + reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}' venv_path: ./test_libs/pyspec/venv test: docker: @@ -78,7 +78,7 @@ jobs: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_cached_venv: venv_name: v1-pyspec - reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}' + reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}' - run: name: Run py-tests command: make citest diff --git a/Makefile b/Makefile index 73d8adea8..10550750a 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS) # installs the packages to run pyspec tests install_test: - cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt; + cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -e .[dev]; test: $(PY_SPEC_ALL_TARGETS) cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest . diff --git a/test_generators/README.md b/test_generators/README.md index 66534e5a8..94db105c0 100644 --- a/test_generators/README.md +++ b/test_generators/README.md @@ -72,7 +72,7 @@ Note: make sure to run `make pyspec` from the root of the specs repository, to b Install all the necessary requirements (re-run when you add more): ```bash -pip3 install -r requirements.txt +pip3 install -e .[pyspec] ``` And write your initial test generator, extending the base generator: diff --git a/test_libs/config_helpers/requirements.txt b/test_libs/config_helpers/requirements.txt deleted file mode 100644 index e441a474b..000000000 --- a/test_libs/config_helpers/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -ruamel.yaml==0.15.87 diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py index 90ad94ee4..b69d19cdb 100644 --- a/test_libs/config_helpers/setup.py +++ b/test_libs/config_helpers/setup.py @@ -1,9 +1,18 @@ from distutils.core import setup + +deps = { + 'config_helpers': [ + "ruamel.yaml==0.15.87", + ], +} + +deps['dev'] = ( + deps['config_helpers'] +) + setup( name='config_helpers', packages=['preset_loader'], - install_requires=[ - "ruamel.yaml==0.15.87" - ] + install_requires=deps['config_helpers'] ) diff --git a/test_libs/gen_helpers/requirements.txt b/test_libs/gen_helpers/requirements.txt deleted file mode 100644 index 3d6a39458..000000000 --- a/test_libs/gen_helpers/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -ruamel.yaml==0.15.87 -eth-utils==1.4.1 diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index 5de27a6db..d4d331c06 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -1,10 +1,20 @@ from distutils.core import setup + +deps = { + 'gen_helpers': [ + "ruamel.yaml==0.15.87", + "eth-utils==1.4.1", + ], +} + +deps['dev'] = ( + deps['gen_helpers'] +) + + setup( name='gen_helpers', packages=['gen_base'], - install_requires=[ - "ruamel.yaml==0.15.87", - "eth-utils==1.4.1" - ] + install_requires=deps['gen_helpers'], ) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md index df1834210..ab2967024 100644 --- a/test_libs/pyspec/README.md +++ b/test_libs/pyspec/README.md @@ -38,7 +38,7 @@ Install dependencies: ```bash python3 -m venv venv . venv/bin/activate -pip3 install -r requirements-testing.txt +pip3 install -e .[dev] ``` Note: make sure to run `make -B pyspec` from the root of the specs repository, to build the parts of the pyspec module derived from the markdown specs. diff --git a/test_libs/pyspec/requirements-testing.txt b/test_libs/pyspec/requirements-testing.txt deleted file mode 100644 index 388a878a9..000000000 --- a/test_libs/pyspec/requirements-testing.txt +++ /dev/null @@ -1,3 +0,0 @@ --r requirements.txt -pytest>=3.6,<3.7 -../config_helpers diff --git a/test_libs/pyspec/requirements.txt b/test_libs/pyspec/requirements.txt deleted file mode 100644 index 78d41708d..000000000 --- a/test_libs/pyspec/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -eth-utils>=1.3.0,<2 -eth-typing>=2.1.0,<3.0.0 -pycryptodome==3.7.3 -py_ecc>=1.6.0 diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 1a131a417..13ea4d6e5 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -1,13 +1,30 @@ from setuptools import setup, find_packages -setup( - name='pyspec', - packages=find_packages(), - tests_require=["pytest"], - install_requires=[ + + + +deps = { + 'pyspec': [ "eth-utils>=1.3.0,<2", "eth-typing>=2.1.0,<3.0.0", "pycryptodome==3.7.3", "py_ecc>=1.6.0", - ] + ], + 'test': [ + "pytest>=3.6,<3.7", + ], +} + +deps['dev'] = ( + deps['pyspec'] + + deps['test'] +) + +install_requires = deps['pyspec'] + +setup( + name='pyspec', + packages=find_packages(), + install_requires=install_requires, + extras_require=deps, ) From 0fc31e3e7d2404567fd1d6e617ebcfa8e89e6dec Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 23 Apr 2019 09:36:18 +0800 Subject: [PATCH 465/481] Update .gitignore --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 3dd86fc80..0419f51b4 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,10 @@ venv .venvs .venv /.pytest_cache +*.egg +*.egg-info +eggs +.eggs build/ output/ From ba99f8a284709a7bb648dbde59c22d4a9eb5d6e5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 23 Apr 2019 10:12:13 +0800 Subject: [PATCH 466/481] Update `install_requires` --- test_libs/config_helpers/setup.py | 8 +++++--- test_libs/gen_helpers/setup.py | 7 ++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py index b69d19cdb..836993d36 100644 --- a/test_libs/config_helpers/setup.py +++ b/test_libs/config_helpers/setup.py @@ -2,17 +2,19 @@ from distutils.core import setup deps = { - 'config_helpers': [ + 'preset_loader': [ "ruamel.yaml==0.15.87", ], } deps['dev'] = ( - deps['config_helpers'] + deps['preset_loader'] ) +install_requires = deps['preset_loader'] + setup( name='config_helpers', packages=['preset_loader'], - install_requires=deps['config_helpers'] + install_requires=install_requires, ) diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index d4d331c06..6d4003573 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -2,19 +2,20 @@ from distutils.core import setup deps = { - 'gen_helpers': [ + 'gen_base': [ "ruamel.yaml==0.15.87", "eth-utils==1.4.1", ], } deps['dev'] = ( - deps['gen_helpers'] + deps['gen_base'] ) +install_requires = deps['gen_base'] setup( name='gen_helpers', packages=['gen_base'], - install_requires=deps['gen_helpers'], + install_requires=install_requires, ) From 5437273e23c4380b33cf94aaae065fbb5686d2a7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 23 Apr 2019 10:15:01 +0800 Subject: [PATCH 467/481] Update `packages` --- .gitignore | 3 +++ test_libs/config_helpers/setup.py | 4 ++-- test_libs/gen_helpers/setup.py | 4 ++-- test_libs/pyspec/setup.py | 4 +--- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 0419f51b4..84938d298 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,6 @@ eth2.0-spec-tests/ # Dynamically built from Markdown spec test_libs/pyspec/eth2spec/phase0/spec.py + +# vscode +.vscode/** diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py index 836993d36..88669c092 100644 --- a/test_libs/config_helpers/setup.py +++ b/test_libs/config_helpers/setup.py @@ -1,4 +1,4 @@ -from distutils.core import setup +from setuptools import setup, find_packages deps = { @@ -15,6 +15,6 @@ install_requires = deps['preset_loader'] setup( name='config_helpers', - packages=['preset_loader'], + packages=find_packages(exclude=["tests", "tests.*"]), install_requires=install_requires, ) diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py index 6d4003573..0c84ff2e2 100644 --- a/test_libs/gen_helpers/setup.py +++ b/test_libs/gen_helpers/setup.py @@ -1,4 +1,4 @@ -from distutils.core import setup +from setuptools import setup, find_packages deps = { @@ -16,6 +16,6 @@ install_requires = deps['gen_base'] setup( name='gen_helpers', - packages=['gen_base'], + packages=find_packages(exclude=["tests", "tests.*"]), install_requires=install_requires, ) diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py index 13ea4d6e5..3fd9d4c0f 100644 --- a/test_libs/pyspec/setup.py +++ b/test_libs/pyspec/setup.py @@ -1,8 +1,6 @@ from setuptools import setup, find_packages - - deps = { 'pyspec': [ "eth-utils>=1.3.0,<2", @@ -24,7 +22,7 @@ install_requires = deps['pyspec'] setup( name='pyspec', - packages=find_packages(), + packages=find_packages(exclude=["tests", "tests.*"]), install_requires=install_requires, extras_require=deps, ) From b1874dc18b6cd7cc65dd52ce4f9b9c8e9ad14f3c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 23 Apr 2019 10:39:52 +0800 Subject: [PATCH 468/481] Update Makefile and CI setting 1. Move .venv to TEST_LIBS_DIR/ 2. Install `config_helpers` separately --- .circleci/config.yml | 8 ++++---- Makefile | 14 ++++++++++---- test_libs/setup.py | 29 +++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 8 deletions(-) create mode 100644 test_libs/setup.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 28d949de4..41e809207 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -60,15 +60,15 @@ jobs: - restore_cache: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_cached_venv: - venv_name: v1-pyspec + venv_name: v1-test_libs reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}' - run: name: Install pyspec requirements command: make install_test - save_cached_venv: - venv_name: v1-pyspec + venv_name: v1-test_libs reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}' - venv_path: ./test_libs/pyspec/venv + venv_path: ./test_libs/venv test: docker: - image: circleci/python:3.6 @@ -77,7 +77,7 @@ jobs: - restore_cache: key: v1-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_cached_venv: - venv_name: v1-pyspec + venv_name: v1-test_libs reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}' - run: name: Run py-tests diff --git a/Makefile b/Makefile index 10550750a..135124898 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts TEST_LIBS_DIR = ./test_libs PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec +CONFIG_HELPERS_DIR = $(TEST_LIBS_DIR)/config_helpers YAML_TEST_DIR = ./eth2.0-spec-tests/tests GENERATOR_DIR = ./test_generators CONFIGS_DIR = ./configs @@ -23,7 +24,8 @@ all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) clean: rm -rf $(YAML_TEST_DIR) rm -rf $(GENERATOR_VENVS) - rm -rf $(PY_SPEC_DIR)/venv $(PY_SPEC_DIR)/.pytest_cache + rm -rf $(TEST_LIBS_DIR)/venv + rm -rf $(PY_SPEC_DIR)/.pytest_cache rm -rf $(PY_SPEC_ALL_TARGETS) # "make gen_yaml_tests" to run generators @@ -31,13 +33,17 @@ gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS) # installs the packages to run pyspec tests install_test: - cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -e .[dev]; + cd $(TEST_LIBS_DIR); python3 -m venv venv; . venv/bin/activate; \ + cd ..; cd $(CONFIG_HELPERS_DIR); pip3 install -e .; \ + cd ../..; cd $(PY_SPEC_DIR); pip3 install -e .[dev]; test: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest . + cd $(TEST_LIBS_DIR); . venv/bin/activate; \ + cd ..; cd $(PY_SPEC_DIR); python -m pytest .; citest: $(PY_SPEC_ALL_TARGETS) - cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml . + cd $(TEST_LIBS_DIR); . venv/bin/activate; \ + cd ..; cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml . # "make pyspec" to create the pyspec for all phases. pyspec: $(PY_SPEC_ALL_TARGETS) diff --git a/test_libs/setup.py b/test_libs/setup.py new file mode 100644 index 000000000..b82fc369c --- /dev/null +++ b/test_libs/setup.py @@ -0,0 +1,29 @@ +from setuptools import setup, find_packages + + +deps = { + 'pyspec': [ + "eth-utils>=1.3.0,<2", + "eth-typing>=2.1.0,<3.0.0", + "pycryptodome==3.7.3", + "py_ecc>=1.6.0", + ], + 'test': [ + "pytest>=3.6,<3.7", + ], +} + +deps['dev'] = ( + deps['pyspec'] + + deps['test'] +) + +install_requires = deps['pyspec'] + + +setup( + name='pyspec', + packages=find_packages(exclude=["tests", "tests.*"]), + install_requires=install_requires, + extras_require=deps, +) From 6888d9f36b1e537684fc5cda4cfae35b7c1eacb0 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 24 Apr 2019 19:30:13 +1000 Subject: [PATCH 469/481] update config constants --- configs/constant_presets/mainnet.yaml | 9 ++++----- configs/constant_presets/minimal.yaml | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml index 8b9dade73..d63c617b7 100644 --- a/configs/constant_presets/mainnet.yaml +++ b/configs/constant_presets/mainnet.yaml @@ -10,7 +10,7 @@ SHARD_COUNT: 1024 # 2**7 (= 128) TARGET_COMMITTEE_SIZE: 128 # 2**12 (= 4,096) -MAX_ATTESTATION_PARTICIPANTS: 4096 +MAX_INDICES_PER_ATTESTATION: 4096 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) @@ -32,7 +32,7 @@ DEPOSIT_CONTRACT_TREE_DEPTH: 32 # 2**0 * 10**9 (= 1,000,000,000) Gwei MIN_DEPOSIT_AMOUNT: 1000000000 # 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_DEPOSIT_AMOUNT: 32000000000 +MAX_EFFECTIVE_BALANCE: 32000000000 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 # 2**0 * 10**9 (= 1,000,000,000) Gwei @@ -44,7 +44,6 @@ HIGH_BALANCE_INCREMENT: 1000000000 GENESIS_FORK_VERSION: 0x00000000 # 0, GENESIS_EPOCH is derived from this constant GENESIS_SLOT: 0 -GENESIS_START_SHARD: 0 # 2**64 - 1 FAR_FUTURE_EPOCH: 18446744073709551615 BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 @@ -110,8 +109,8 @@ MAX_ATTESTATIONS: 128 MAX_DEPOSITS: 16 # 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -# 2**4 (= 16) -MAX_TRANSFERS: 16 +# Originally 2**4 (= 16), disabled for now. +MAX_TRANSFERS: 0 # Signature domains diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml index edc447c45..711f6737d 100644 --- a/configs/constant_presets/minimal.yaml +++ b/configs/constant_presets/minimal.yaml @@ -10,7 +10,7 @@ SHARD_COUNT: 8 # [customized] unsecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**12 (= 4,096) -MAX_ATTESTATION_PARTICIPANTS: 4096 +MAX_INDICES_PER_ATTESTATION: 4096 # 2**2 (= 4) MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) @@ -32,7 +32,7 @@ DEPOSIT_CONTRACT_TREE_DEPTH: 32 # 2**0 * 10**9 (= 1,000,000,000) Gwei MIN_DEPOSIT_AMOUNT: 1000000000 # 2**5 * 10**9 (= 32,000,000,000) Gwei -MAX_DEPOSIT_AMOUNT: 32000000000 +MAX_EFFECTIVE_BALANCE: 32000000000 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 # 2**0 * 10**9 (= 1,000,000,000) Gwei @@ -44,7 +44,6 @@ HIGH_BALANCE_INCREMENT: 1000000000 GENESIS_FORK_VERSION: 0x00000000 # 0, GENESIS_EPOCH is derived from this constant GENESIS_SLOT: 0 -GENESIS_START_SHARD: 0 # 2**64 - 1 FAR_FUTURE_EPOCH: 18446744073709551615 BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 @@ -110,8 +109,8 @@ MAX_ATTESTATIONS: 128 MAX_DEPOSITS: 16 # 2**4 (= 16) MAX_VOLUNTARY_EXITS: 16 -# 2**4 (= 16) -MAX_TRANSFERS: 16 +# Originally 2**4 (= 16), disabled for now. +MAX_TRANSFERS: 0 # Signature domains From 9c2fa02658d7020b7d858770d70070378f3856d2 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 24 Apr 2019 20:54:39 +1000 Subject: [PATCH 470/481] Update test_libs/pyspec/tests/test_sanity.py Co-Authored-By: JustinDrake --- test_libs/pyspec/tests/test_sanity.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index b54b5f75a..071f3f8b3 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -323,7 +323,6 @@ def test_attestation(state): assert len(test_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 - proposer_index = get_beacon_proposer_index(test_state) # # Epoch transition should move to previous_epoch_attestations From b1e1510e213cabe4cec2322d7bf4f5334919d75a Mon Sep 17 00:00:00 2001 From: Justin Date: Wed, 24 Apr 2019 20:57:31 +1000 Subject: [PATCH 471/481] Update 0_beacon-chain.md --- specs/core/0_beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 4a9dcc26c..cf051b5e3 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1429,6 +1429,7 @@ def get_total_active_balance(state: BeaconState) -> Gwei: ```python def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]: + assert epoch in (get_current_epoch(state), get_previous_epoch(state)) return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations ``` @@ -1472,8 +1473,7 @@ def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationDat ```python def get_winning_crosslink_and_attesting_indices(state: BeaconState, shard: Shard, epoch: Epoch) -> Tuple[Crosslink, List[ValidatorIndex]]: - attestations = get_matching_source_attestations(state, epoch) - shard_attestations = [a for a in attestations if a.data.shard == shard] + shard_attestations = [a for a in get_matching_source_attestations(state, epoch) if a.data.shard == shard] shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] candidate_crosslinks = [ c for c in shard_crosslinks From 20d65e040b4fc140da5441aba4540a8618a6a482 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 11:31:24 -0600 Subject: [PATCH 472/481] pr feedback --- specs/core/0_beacon-chain.md | 17 +++++++++++++++-- specs/validator/0_beacon-chain-validator.md | 4 ++-- test_libs/pyspec/tests/test_sanity.py | 11 +---------- tests/phase0/helpers.py | 2 +- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index cf051b5e3..8eebc1618 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -68,6 +68,7 @@ - [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot) - [`get_block_root_at_slot`](#get_block_root_at_slot) - [`get_block_root`](#get_block_root) + - [`get_state_root`](#get_state_root) - [`get_randao_mix`](#get_randao_mix) - [`get_active_index_root`](#get_active_index_root) - [`generate_seed`](#generate_seed) @@ -877,6 +878,18 @@ def get_block_root(state: BeaconState, return get_block_root_at_slot(state, get_epoch_start_slot(epoch)) ``` +### `get_state_root` + +```python +def get_state_root(state: BeaconState, + slot: Slot) -> Bytes32: + """ + Return the state root at a recent ``slot``. + """ + assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT + return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] +``` + ### `get_randao_mix` ```python @@ -1519,12 +1532,12 @@ def process_justification_and_finalization(state: BeaconState) -> None: state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 previous_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, previous_epoch)) if previous_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: - state.current_justified_epoch = get_previous_epoch(state) + state.current_justified_epoch = previous_epoch state.current_justified_root = get_block_root(state, state.current_justified_epoch) state.justification_bitfield |= (1 << 1) current_epoch_matching_target_balance = get_attesting_balance(state, get_matching_target_attestations(state, current_epoch)) if current_epoch_matching_target_balance * 3 >= get_total_active_balance(state) * 2: - state.current_justified_epoch = get_current_epoch(state) + state.current_justified_epoch = current_epoch state.current_justified_root = get_block_root(state, state.current_justified_epoch) state.justification_bitfield |= (1 << 0) diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 03d5b5f5b..8cc38eebf 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -138,7 +138,7 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b ### Block proposal -A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). +A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks). @@ -368,7 +368,7 @@ def get_committee_assignment( return assignment ``` -A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question and can not reliably be used to predict in advance. +A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question. Proposer selection is only stable within the context of the current epoch. ```python def is_proposer_at_slot(state: BeaconState, diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 071f3f8b3..d3035576b 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -9,7 +9,6 @@ from eth2spec.utils.minimal_ssz import signing_root from eth2spec.phase0.spec import ( # constants ZERO_HASH, - SLOTS_PER_HISTORICAL_ROOT, # SSZ Deposit, Transfer, @@ -17,13 +16,12 @@ from eth2spec.phase0.spec import ( # functions get_active_validator_indices, get_beacon_proposer_index, - get_block_root, get_block_root_at_slot, + get_state_root, get_current_epoch, get_domain, advance_slot, cache_state, - slot_to_epoch, verify_merkle_branch, hash, ) @@ -49,13 +47,6 @@ from .helpers import ( ) -def get_state_root(state, slot) -> bytes: - """ - Return the state root at a recent ``slot``. - """ - assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT - return state.latest_state_roots[slot % SLOTS_PER_HISTORICAL_ROOT] - # mark entire file as 'sanity' pytestmark = pytest.mark.sanity diff --git a/tests/phase0/helpers.py b/tests/phase0/helpers.py index c042dec91..18898dd3c 100644 --- a/tests/phase0/helpers.py +++ b/tests/phase0/helpers.py @@ -142,7 +142,7 @@ def build_attestation_data(state, slot, shard): if epoch_start_slot == slot: epoch_boundary_root = block_root else: - get_block_root(state, get_current_epoch(state)) + epoch_boundary_root = get_block_root(state, get_current_epoch(state)) if slot < epoch_start_slot: justified_block_root = state.previous_justified_root From 7f720133fab5248623333f08864deb1ac4b432b7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 24 Apr 2019 11:38:26 -0600 Subject: [PATCH 473/481] Apply suggestions from code review Co-Authored-By: djrtwo --- test_libs/pyspec/tests/test_finality.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/tests/test_finality.py index 8c1b4e871..4aee1c538 100644 --- a/test_libs/pyspec/tests/test_finality.py +++ b/test_libs/pyspec/tests/test_finality.py @@ -77,7 +77,7 @@ def test_finality_from_genesis_rule_4(state): test_state = deepcopy(state) blocks = [] - for epoch in range(6): + for epoch in range(4): prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) blocks += new_blocks @@ -88,7 +88,7 @@ def test_finality_from_genesis_rule_4(state): elif epoch == 2: check_finality(test_state, prev_state, True, False, False) elif epoch >= 3: - # rule 4 of finaliy + # rule 4 of finality check_finality(test_state, prev_state, True, True, True) assert test_state.finalized_epoch == prev_state.current_justified_epoch assert test_state.finalized_root == prev_state.current_justified_root @@ -137,10 +137,10 @@ def test_finality_rule_2(state): if epoch == 0: prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) check_finality(test_state, prev_state, True, False, False) - if epoch == 1: + elif epoch == 1: prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False) check_finality(test_state, prev_state, False, True, False) - if epoch == 2: + elif epoch == 2: prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True) # finalized by rule 2 check_finality(test_state, prev_state, True, False, True) From 9e8a9a26fd787550710531a3b9045e2f18cd5860 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 11:44:03 -0600 Subject: [PATCH 474/481] PR feedback --- test_libs/pyspec/tests/test_finality.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/tests/test_finality.py index 4aee1c538..9992d2a78 100644 --- a/test_libs/pyspec/tests/test_finality.py +++ b/test_libs/pyspec/tests/test_finality.py @@ -73,7 +73,7 @@ def next_epoch_with_attestations(state, return state, blocks, post_state -def test_finality_from_genesis_rule_4(state): +def test_finality_rule_4(state): test_state = deepcopy(state) blocks = [] @@ -81,8 +81,10 @@ def test_finality_from_genesis_rule_4(state): prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) blocks += new_blocks + # justification/finalization skipped at GENESIS_EPOCH if epoch == 0: check_finality(test_state, prev_state, False, False, False) + # justification/finalization skipped at GENESIS_EPOCH + 1 elif epoch == 1: check_finality(test_state, prev_state, False, False, False) elif epoch == 2: @@ -132,8 +134,6 @@ def test_finality_rule_2(state): blocks = [] for epoch in range(3): - old_previous_justified_epoch = test_state.previous_justified_epoch - old_previous_justified_root = test_state.previous_justified_root if epoch == 0: prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) check_finality(test_state, prev_state, True, False, False) @@ -144,8 +144,8 @@ def test_finality_rule_2(state): prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True) # finalized by rule 2 check_finality(test_state, prev_state, True, False, True) - assert test_state.finalized_epoch == old_previous_justified_epoch - assert test_state.finalized_root == old_previous_justified_root + assert test_state.finalized_epoch == prev_state.previous_justified_epoch + assert test_state.finalized_root == prev_state.previous_justified_root blocks += new_blocks @@ -170,19 +170,24 @@ def test_finality_rule_3(state): blocks += new_blocks check_finality(test_state, prev_state, True, False, False) + # In epoch N, JE is set to N, prev JE is set to N-1 prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False) blocks += new_blocks check_finality(test_state, prev_state, True, True, True) + # In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False) blocks += new_blocks check_finality(test_state, prev_state, False, True, False) + # In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1. + # N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2 prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True) blocks += new_blocks # rule 2 check_finality(test_state, prev_state, True, False, True) + # In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3. prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, True) blocks += new_blocks # rule 3 From 53b06745330e1b44586008bd9e5cfbe4f51ccea1 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 11:45:41 -0600 Subject: [PATCH 475/481] remove unnecessary var --- test_libs/pyspec/tests/test_finality.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/tests/test_finality.py index 9992d2a78..ca048c2b2 100644 --- a/test_libs/pyspec/tests/test_finality.py +++ b/test_libs/pyspec/tests/test_finality.py @@ -52,7 +52,7 @@ def next_epoch_with_attestations(state, fill_prev_epoch): post_state = deepcopy(state) blocks = [] - for slot in range(spec.SLOTS_PER_EPOCH): + for _ in range(spec.SLOTS_PER_EPOCH): block = build_empty_block_for_next_slot(post_state) if fill_cur_epoch: slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 From 7e6a69dfaa1d03b3f260cbc42bfbdb26973ac904 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 12:31:27 -0600 Subject: [PATCH 476/481] scale number of validators in tests based on number of slots --- test_libs/pyspec/tests/conftest.py | 4 ++-- test_libs/pyspec/tests/helpers.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test_libs/pyspec/tests/conftest.py b/test_libs/pyspec/tests/conftest.py index f6c70d280..9840dc7b2 100644 --- a/test_libs/pyspec/tests/conftest.py +++ b/test_libs/pyspec/tests/conftest.py @@ -22,8 +22,8 @@ def config(request): @pytest.fixture -def num_validators(): - return 100 +def num_validators(config): + return spec.SLOTS_PER_EPOCH * 8 @pytest.fixture diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py index a728a6acc..b30aaf35e 100644 --- a/test_libs/pyspec/tests/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -48,7 +48,7 @@ from eth2spec.utils.merkle_minimal import ( ) -privkeys = [i + 1 for i in range(1000)] +privkeys = [i + 1 for i in range(1024)] pubkeys = [bls.privtopub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} From 2b999a6c7e5578471f82b1442ff8633506a858ee Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 13:15:06 -0600 Subject: [PATCH 477/481] fix off by one error for eth1 data voting --- specs/core/0_beacon-chain.md | 2 +- test_libs/pyspec/tests/test_sanity.py | 46 +++++++++++++++++++-------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index 8eebc1618..c09182ffe 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -1726,7 +1726,7 @@ def process_final_updates(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = current_epoch + 1 # Reset eth1 data votes - if state.slot % SLOTS_PER_ETH1_VOTING_PERIOD == 0: + if (state.slot + 1) % SLOTS_PER_ETH1_VOTING_PERIOD == 0: state.eth1_data_votes = [] # Update effective balances with hysteresis for index, validator in enumerate(state.validator_registry): diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index 9258e851d..d018fc25f 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -376,17 +376,15 @@ def test_transfer(state): def test_balance_driven_status_transitions(state): - pre_state = deepcopy(state) + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[-1] - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state, current_epoch)[-1] - - assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH # set validator balance to below ejection threshold - pre_state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE + state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE - post_state = deepcopy(pre_state) + post_state = deepcopy(state) # # trigger epoch transition # @@ -396,14 +394,13 @@ def test_balance_driven_status_transitions(state): assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH - return pre_state, [block], post_state + return state, [block], post_state def test_historical_batch(state): - pre_state = deepcopy(state) - pre_state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (pre_state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1 + state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1 - post_state = deepcopy(pre_state) + post_state = deepcopy(state) block = build_empty_block_for_next_slot(post_state) @@ -411,6 +408,29 @@ def test_historical_batch(state): assert post_state.slot == block.slot assert get_current_epoch(post_state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0 - assert len(post_state.historical_roots) == len(pre_state.historical_roots) + 1 + assert len(post_state.historical_roots) == len(state.historical_roots) + 1 - return pre_state, [block], post_state + return state, [block], post_state + + +def test_eth1_data_votes(state): + post_state = deepcopy(state) + + expected_votes = 0 + assert len(state.eth1_data_votes) == expected_votes + + blocks = [] + for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1): + block = build_empty_block_for_next_slot(post_state) + state_transition(post_state, block) + expected_votes += 1 + assert len(post_state.eth1_data_votes) == expected_votes + blocks.append(block) + + block = build_empty_block_for_next_slot(post_state) + state_transition(post_state, block) + + assert post_state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0 + assert len(post_state.eth1_data_votes) == 1 + + return state, blocks, post_state From bd5096074000efe7cf354ec53c51d1787bd2efd6 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 13:20:55 -0600 Subject: [PATCH 478/481] tiny fix to test ouptut --- test_libs/pyspec/tests/test_sanity.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py index d018fc25f..b7d31f122 100644 --- a/test_libs/pyspec/tests/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -429,6 +429,7 @@ def test_eth1_data_votes(state): block = build_empty_block_for_next_slot(post_state) state_transition(post_state, block) + blocks.append(block) assert post_state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0 assert len(post_state.eth1_data_votes) == 1 From ca61118608cab91dcd8e32f199d9403324f8fda4 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 13:41:41 -0600 Subject: [PATCH 479/481] update readme to new phase 0 docs --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b2b369e11..fdd284845 100644 --- a/README.md +++ b/README.md @@ -10,15 +10,20 @@ This repo hosts the current eth2.0 specifications. Discussions about design rati ## Specs Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: -* [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md) -* [Phase 1 -- Custody Game](specs/core/1_custody-game.md) -* [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) + +* Phase 0 + * [The Beacon Chain](specs/core/0_beacon-chain.md) + * [Fork Choice](specs/core/0_fork-choice.md) + * [Deposit Contract](specs/core/0_deposit-contract.md) + * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) +* Phase 1 + * [Phase 1 -- Custody Game](specs/core/1_custody-game.md) + * [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) Accompanying documents can be found in [specs](specs) and include: * [SimpleSerialize (SSZ) spec](specs/simple-serialize.md) * [BLS signature verification](specs/bls_signature.md) * [General test format](specs/test_formats/README.md) -* [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) * [Merkle proof formats](specs/light_client/merkle_proofs.md) * [Light client syncing protocol](specs/light_client/sync_protocol.md) From 216002053626858bc9aa939ac4679573bc89e1fd Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 13:43:07 -0600 Subject: [PATCH 480/481] update readme --- README.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index fdd284845..2c262de41 100644 --- a/README.md +++ b/README.md @@ -11,16 +11,18 @@ This repo hosts the current eth2.0 specifications. Discussions about design rati Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: -* Phase 0 - * [The Beacon Chain](specs/core/0_beacon-chain.md) - * [Fork Choice](specs/core/0_fork-choice.md) - * [Deposit Contract](specs/core/0_deposit-contract.md) - * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) -* Phase 1 - * [Phase 1 -- Custody Game](specs/core/1_custody-game.md) - * [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) +### Phase 0 +* [The Beacon Chain](specs/core/0_beacon-chain.md) +* [Fork Choice](specs/core/0_fork-choice.md) +* [Deposit Contract](specs/core/0_deposit-contract.md) +* [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) + +### Phase 1 +* [Phase 1 -- Custody Game](specs/core/1_custody-game.md) +* [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) + +### Accompanying documents can be found in [specs](specs) and include: -Accompanying documents can be found in [specs](specs) and include: * [SimpleSerialize (SSZ) spec](specs/simple-serialize.md) * [BLS signature verification](specs/bls_signature.md) * [General test format](specs/test_formats/README.md) From f7f8ecb7abbddfe465ab3a9f7363119af4cfca64 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Apr 2019 13:43:45 -0600 Subject: [PATCH 481/481] update readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2c262de41..ad7204f21 100644 --- a/README.md +++ b/README.md @@ -18,8 +18,8 @@ Core specifications for eth2.0 client validation can be found in [specs/core](sp * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) ### Phase 1 -* [Phase 1 -- Custody Game](specs/core/1_custody-game.md) -* [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md) +* [Custody Game](specs/core/1_custody-game.md) +* [Shard Data Chains](specs/core/1_shard-data-chains.md) ### Accompanying documents can be found in [specs](specs) and include: