From 0dec828d89c522aa2048e47f681eccb41b5fa282 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 11 Mar 2021 18:33:36 +0600 Subject: [PATCH 001/127] Add initial merge spec --- specs/merge/beacon-chain.md | 257 ++++++++++++++++++++++++++++++++++++ specs/merge/fork-choice.md | 107 +++++++++++++++ specs/merge/validator.md | 77 +++++++++++ 3 files changed, 441 insertions(+) create mode 100644 specs/merge/beacon-chain.md create mode 100644 specs/merge/fork-choice.md create mode 100644 specs/merge/validator.md diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md new file mode 100644 index 000000000..a235dd518 --- /dev/null +++ b/specs/merge/beacon-chain.md @@ -0,0 +1,257 @@ +# Ethereum 2.0 The Merge + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Constants](#constants) + - [Execution](#execution) +- [Containers](#containers) + - [Extended containers](#extended-containers) + - [`BeaconBlockBody`](#beaconblockbody) + - [`BeaconState`](#beaconstate) + - [New containers](#new-containers) + - [`Transaction`](#transaction) + - [`ApplicationPayload`](#applicationpayload) +- [Helper functions](#helper-functions) + - [Misc](#misc) + - [`compute_randao_mix`](#compute_randao_mix) + - [`compute_time_at_slot`](#compute_time_at_slot) + - [Beacon state accessors](#beacon-state-accessors) + - [`get_recent_beacon_block_roots`](#get_recent_beacon_block_roots) + - [`get_evm_beacon_block_roots`](#get_evm_beacon_block_roots) + - [Block processing](#block-processing) + - [Modified `process_eth1_data`](#modified-process_eth1_data) + - [Application payload processing](#application-payload-processing) + - [`BeaconChainData`](#beaconchaindata) + - [`get_application_state`](#get_application_state) + - [`application_state_transition`](#application_state_transition) + - [`process_application_payload`](#process_application_payload) + + + + +## Introduction + +This is a patch implementing executable beacon chain proposal. +It enshrines application execution and validity as a first class citizen at the core of the beacon chain. + +## Constants + +### Execution + +| Name | Value | +| - | - | +| `MAX_BYTES_PER_TRANSACTION_PAYLOAD` | `2**20` | +| `MAX_APPLICATION_TRANSACTIONS` | `2**14` | +| `BYTES_PER_LOGS_BLOOM` | `2**8` | +| `EVM_BLOCK_ROOTS_SIZE` | `2**8` | + + +## Containers + +### Extended containers + +*Note*: Extended SSZ containers inherit all fields from the parent in the original +order and append any additional fields to the end. + +#### `BeaconBlockBody` + +*Note*: `BeaconBlockBody` fields remain unchanged other than the addition of `application_payload`. + +```python +class BeaconBlockBody(phase0.BeaconBlockBody): + application_payload: ApplicationPayload # User execution payload +``` + +#### `BeaconState` + +*Note*: `BeaconState` fields remain unchanged other than the removal of `eth1_data_votes` and addition of `application_state_root`. +The latter stores the root hash of ethereum application state. + +```python +class BeaconState(Container): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + # [removed] eth1_data_votes + eth1_deposit_index: uint64 + # [new] Hash of the root of application state + application_state_root: Bytes32 + # [new] Hash of recent application block + application_block_hash: Bytes32 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Attestations + previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] + current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint # Previous epoch snapshot + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint +``` + +### New containers + +#### `Transaction` + +Application transaction fields structured as an SSZ object for inclusion in an `ApplicationPayload` contained within a `BeaconBlock`. + +```python +class Transaction(Container): + nonce: uint64 + gas_price: uint256 + gas_limit: uint64 + recipient: Bytes20 + value: uint256 + input: List[Bytes1, MAX_BYTES_PER_TRANSACTION_PAYLOAD] + v: uint256 + r: uint256 + s: uint256 +``` + +#### `ApplicationPayload` + +The application payload included in a `BeaconBlock`. + +```python +class ApplicationPayload(Container): + block_hash: Bytes32 # Hash of application block + coinbase: Bytes20 + state_root: Bytes32 + gas_limit: uint64 + gas_used: uint64 + receipt_root: Bytes32 + logs_bloom: Vector[Bytes1, BYTES_PER_LOGS_BLOOM] + difficulty: uint64 # Temporary field, will be removed later on + transactions: List[Transaction, MAX_APPLICATION_TRANSACTIONS] +``` + +## Helper functions + +### Misc + +#### `compute_randao_mix` + +```python +def compute_randao_mix(state: BeaconState, randao_reveal: BLSSignature) -> Bytes32: + epoch = get_current_epoch(state) + return xor(get_randao_mix(state, epoch), hash(randao_reveal)) +``` + +#### `compute_time_at_slot` + +```python +def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: + return uint64(state.genesis_time + slot * SECONDS_PER_SLOT) +``` + +### Beacon state accessors + +#### `get_recent_beacon_block_roots` + +```python +def get_recent_beacon_block_roots(state: BeaconState, qty: uint64) -> Sequence[Bytes32]: + return [get_block_root_at_slot(state.slot - i) if GENESIS_SLOT + i < state.slot else Bytes32() for i in reversed(range(1, qty + 1))] +``` + +#### `get_evm_beacon_block_roots` + +```python +def get_evm_beacon_block_roots(state: BeaconState) -> Sequence[Bytes32]: + num_block_roots = min(BLOCK_ROOTS_FOR_EVM_SIZE, SLOTS_PER_HISTORICAL_ROOT) + return get_recent_beacon_block_roots(state, num_block_roots) +``` + +### Block processing + +```python +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + process_randao(state, block.body) + process_eth1_data(state, block.body) # [Modified in The Merge] + process_operations(state, block.body) + process_application_payload(state, block.body) # [New in The Merge] +``` + +#### Modified `process_eth1_data` + +*Note*: The function `process_eth1_data` is modified to update `state.eth1_data` with `eth1_data` of each block. + +```python +def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: + state.eth1_data = body.eth1_data +``` + +#### Application payload processing + +##### `BeaconChainData` + +*Note*: `BeaconChainData` contains beacon state data that is used by the application state transition function. + +```python +class BeaconChainData(Container): + slot: Slot + randao_mix: Bytes32 + timestamp: uint64 + recent_block_roots: Sequence[Bytes32] +``` + +##### `get_application_state` + +*Note*: `ApplicationState` class is an abstract class representing ethereum application state. + +Let `get_application_state(application_state_root: Bytes32) -> ApplicationState` be the function that given the root hash returns a copy of ethereum application state. +The body of the function is implementation dependant. + +##### `application_state_transition` + +Let `application_state_transition(application_state: ApplicationState, beacon_chain_data: BeaconChainData, application_payload: ApplicationPayload) -> None` be the transition function of ethereum application state. +The body of the function is implementation dependant. + +*Note*: `application_state_transition` must throw `AssertionError` if either transition itself or post-transition verifications has failed. + +##### `process_application_payload` + +```python +def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> None: + """ + Note: This function is designed to be able to be run in parallel with + the other `process_block` sub-functions + """ + + # Utilizes `compute_randao_mix` to avoid any assumptions about + # the processing of other `process_block` sub-functions + beacon_chain_data = BeaconChainData( + slot=state.slot, + randao_mix=compute_randao_mix(state, body.randao_reveal), + timestamp=compute_time_at_slot(state.genesis_time, state.slot), + recent_block_roots=get_evm_beacon_block_roots(state) + ) + + application_state = get_application_state(state.application_state_root) + application_state_transition(application_state, beacon_chain_data, body.application_payload) + + state.application_state_root = body.application_payload.state_root + state.application_block_hash = body.application_payload.block_hash +``` diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md new file mode 100644 index 000000000..9d8f175d0 --- /dev/null +++ b/specs/merge/fork-choice.md @@ -0,0 +1,107 @@ +# Ethereum 2.0 The Merge + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + +- [Introduction](#introduction) + - [Helpers](#helpers) + - [`get_eth1_data`](#get_eth1_data) + - [`is_valid_eth1_data`](#is_valid_eth1_data) + - [Updated fork-choice handlers](#updated-fork-choice-handlers) + - [`on_block`](#on_block) + + + + +## Introduction + +This is the modification of the fork choice according to the executable beacon chain proposal. + +*Note*: It introduces the following change. `Eth1Data` included in a block must correspond to the application state produced by the parent block. This acts as an additional filter on the block subtree under consideration for the beacon block fork choice. + +### Helpers + +#### `get_eth1_data` + +Let `get_eth1_data(state: BeaconState) -> Eth1Data` be the function that returns the `Eth1Data` obtained from the beacon state. + +*Note*: This is mostly a function of the state of the beacon chain deposit contract. It can be read from the application state and/or logs. The `block_hash` value of `Eth1Data` must be set to `state.application_block_hash`. + +#### `is_valid_eth1_data` + +Used by fork-choice handler, `on_block` + +```python +def is_valid_eth1_data(store: Store, block: BeaconBlock) -> boolean: + parent_state = store.block_states[block.parent_root] + expected_eth1_data = get_eth1_data(parent_state) + actual_eth1_data = block.body.eth1_data + + is_correct_root = expected_eth1_data.deposit_root == actual_eth1_data.deposit_root + is_correct_count = expected_eth1_data.deposit_count == actual_eth1_data.deposit_count + is_correct_block_hash = expected_eth1_data.block_hash == actual_eth1_data.block_hash + return is_correct_root and is_correct_count and is_correct_block_hash +``` + +### Updated fork-choice handlers + +#### `on_block` + +*Note*: The only modification is the addition of the `Eth1Data` validity assumption. + +```python +def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: + block = signed_block.message + # Parent block must be known + assert block.parent_root in store.block_states + # Make a copy of the state to avoid mutability issues + pre_state = copy(store.block_states[block.parent_root]) + # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past. + assert get_current_slot(store) >= block.slot + + # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert block.slot > finalized_slot + # Check block is a descendant of the finalized block at the checkpoint finalized slot + assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root + + # [Added] Check that Eth1 data is correct + assert is_valid_eth1_data(store, block) + + # Check the block is valid and compute the post-state + state = pre_state.copy() + state_transition(state, signed_block, True) + # Add new block to the store + store.blocks[hash_tree_root(block)] = block + # Add new state for this block to the store + store.block_states[hash_tree_root(block)] = state + + # Update justified checkpoint + if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: + if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: + store.best_justified_checkpoint = state.current_justified_checkpoint + if should_update_justified_checkpoint(store, state.current_justified_checkpoint): + store.justified_checkpoint = state.current_justified_checkpoint + + # Update finalized checkpoint + if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: + store.finalized_checkpoint = state.finalized_checkpoint + + # Potentially update justified if different from store + if store.justified_checkpoint != state.current_justified_checkpoint: + # Update justified if new justified is later than store justified + if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: + store.justified_checkpoint = state.current_justified_checkpoint + return + + # Update justified if store justified is not in chain with finalized checkpoint + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot) + if ancestor_at_finalized_slot != store.finalized_checkpoint.root: + store.justified_checkpoint = state.current_justified_checkpoint +``` + diff --git a/specs/merge/validator.md b/specs/merge/validator.md new file mode 100644 index 000000000..d55ec1c8f --- /dev/null +++ b/specs/merge/validator.md @@ -0,0 +1,77 @@ +# Ethereum 2.0 Phase 1 -- Honest Validator + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Beacon chain responsibilities](#beacon-chain-responsibilities) + - [Block proposal](#block-proposal) + - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) + - [Eth1 data](#eth1-data) + - [`get_eth1_data`](#get_eth1_data) + - [Application Payload](#application-payload) + - [`produce_application_payload`](#produce_application_payload) + + + + +## Introduction + +This document represents the changes to be made in the code of an "honest validator" to implement executable beacon chain proposal. + +## Prerequisites + +This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden. + +All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout. Please see related Beacon Chain doc before continuing and use them as a reference throughout. + +## Beacon chain responsibilities + +All validator responsibilities remain unchanged other than those noted below. Namely, the modification of `Eth1Data` and the addition of `ApplicationPayload`. + +### Block proposal + +#### Constructing the `BeaconBlockBody` + +##### Eth1 data + +The `block.body.eth1_data` field is for block proposers to publish recent Eth1 data. This recent data contains deposit root (as calculated by the get_deposit_root() method of the deposit contract) and deposit count after processing of the parent block. The fork choice verifies Eth1 data of a block, then `state.eth1_data` updates immediately allowing new deposits to be processed. Each deposit in `block.body.deposits` must verify against `state.eth1_data.deposit_root`. + +###### `get_eth1_data` + +Let `get_eth1_data(state: BeaconState) -> Eth1Data` be the function that returns the `Eth1Data` obtained from the beacon state. + +*Note*: This is mostly a function of the state of the beacon chain deposit contract. It can be read from the application state and/or logs. The `block_hash` value of `Eth1Data` must be set to `state.application_block_hash`. + +Set `block.body.eth1_data = get_eth1_data(state)`. + + +##### Application Payload + +###### `produce_application_payload` + +Let `produce_application_payload(parent_hash: Bytes32, beacon_chain_data: BeaconChainData) -> ApplicationPayload` be the function that produces new instance of application payload. + + +* Let `randao_reveal` be `block.body.randao_reveal` of the block that is being produced +* Set `block.body.application_payload = get_application_payload(state, randao_reveal)` where: + +```python +def get_application_payload(state: BeaconState, randao_reveal: BLSSignature) -> ApplicationPayload: + application_parent_hash = state.application_block_hash + beacon_chain_data = BeaconChainData( + slot=state.slot, + randao_mix=compute_randao_mix(state, randao_reveal), + timestamp=compute_time_at_slot(state.genesis_time, state.slot), + recent_block_roots=get_evm_beacon_block_roots(state) + ) + + return produce_application_payload(application_parent_hash, beacon_chain_data) +``` + From ee161634b203eb6eab8f108ab03c0f9a4a2fb781 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 11 Mar 2021 18:52:31 +0600 Subject: [PATCH 002/127] Polish beacon chain spec and validator guide --- specs/merge/beacon-chain.md | 16 ++++++++-------- specs/merge/validator.md | 7 +++---- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index a235dd518..4ded574f0 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -66,13 +66,13 @@ order and append any additional fields to the end. ```python class BeaconBlockBody(phase0.BeaconBlockBody): - application_payload: ApplicationPayload # User execution payload + application_payload: ApplicationPayload # [Added] application payload ``` #### `BeaconState` -*Note*: `BeaconState` fields remain unchanged other than the removal of `eth1_data_votes` and addition of `application_state_root`. -The latter stores the root hash of ethereum application state. +*Note*: `BeaconState` fields remain unchanged other than the removal of `eth1_data_votes` and addition of `application_state_root` and `application_block_hash`. + ```python class BeaconState(Container): @@ -88,11 +88,11 @@ class BeaconState(Container): historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Eth1 eth1_data: Eth1Data - # [removed] eth1_data_votes + # [Removed] eth1_data_votes eth1_deposit_index: uint64 - # [new] Hash of the root of application state + # [Added] hash of the root of application state application_state_root: Bytes32 - # [new] Hash of recent application block + # [Added] hash of recent application block application_block_hash: Bytes32 # Registry validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] @@ -179,7 +179,7 @@ def get_recent_beacon_block_roots(state: BeaconState, qty: uint64) -> Sequence[B ```python def get_evm_beacon_block_roots(state: BeaconState) -> Sequence[Bytes32]: - num_block_roots = min(BLOCK_ROOTS_FOR_EVM_SIZE, SLOTS_PER_HISTORICAL_ROOT) + num_block_roots = min(EVM_BLOCK_ROOTS_SIZE, SLOTS_PER_HISTORICAL_ROOT) return get_recent_beacon_block_roots(state, num_block_roots) ``` @@ -229,7 +229,7 @@ The body of the function is implementation dependant. Let `application_state_transition(application_state: ApplicationState, beacon_chain_data: BeaconChainData, application_payload: ApplicationPayload) -> None` be the transition function of ethereum application state. The body of the function is implementation dependant. -*Note*: `application_state_transition` must throw `AssertionError` if either transition itself or post-transition verifications has failed. +*Note*: `application_state_transition` must throw `AssertionError` if either the transition itself or one of the post-transition verifications has failed. ##### `process_application_payload` diff --git a/specs/merge/validator.md b/specs/merge/validator.md index d55ec1c8f..75e804187 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Honest Validator +# Ethereum 2.0 The Merge **Notice**: This document is a work-in-progress for researchers and implementers. @@ -41,7 +41,7 @@ All validator responsibilities remain unchanged other than those noted below. Na ##### Eth1 data -The `block.body.eth1_data` field is for block proposers to publish recent Eth1 data. This recent data contains deposit root (as calculated by the get_deposit_root() method of the deposit contract) and deposit count after processing of the parent block. The fork choice verifies Eth1 data of a block, then `state.eth1_data` updates immediately allowing new deposits to be processed. Each deposit in `block.body.deposits` must verify against `state.eth1_data.deposit_root`. +The `block.body.eth1_data` field is for block proposers to publish recent Eth1 data. This recent data contains deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after processing of the parent block. The fork choice verifies Eth1 data of a block, then `state.eth1_data` updates immediately allowing new deposits to be processed. Each deposit in `block.body.deposits` must verify against `state.eth1_data.deposit_root`. ###### `get_eth1_data` @@ -57,7 +57,7 @@ Set `block.body.eth1_data = get_eth1_data(state)`. ###### `produce_application_payload` Let `produce_application_payload(parent_hash: Bytes32, beacon_chain_data: BeaconChainData) -> ApplicationPayload` be the function that produces new instance of application payload. - +The body of this function is implementation dependant. * Let `randao_reveal` be `block.body.randao_reveal` of the block that is being produced * Set `block.body.application_payload = get_application_payload(state, randao_reveal)` where: @@ -74,4 +74,3 @@ def get_application_payload(state: BeaconState, randao_reveal: BLSSignature) -> return produce_application_payload(application_parent_hash, beacon_chain_data) ``` - From f6f36872d82d15e6b3ee2a9afc0fd949f4e9ad13 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 15:19:58 +0600 Subject: [PATCH 003/127] Index from GENESIS_SLOT in compute_time_at_slot Co-authored-by: Paul Hauner --- specs/merge/beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 4ded574f0..e12a89e20 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -163,7 +163,8 @@ def compute_randao_mix(state: BeaconState, randao_reveal: BLSSignature) -> Bytes ```python def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: - return uint64(state.genesis_time + slot * SECONDS_PER_SLOT) + slots_since_genesis = slot - GENESIS_SLOT + return uint64(state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT) ``` ### Beacon state accessors From 3fb5f2ec8132febcc48ea66c4463b5e0c8a76809 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 15:20:52 +0600 Subject: [PATCH 004/127] Use Vector struct for recent_block_roots field Co-authored-by: Paul Hauner --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index e12a89e20..e2ed48b3e 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -215,7 +215,7 @@ class BeaconChainData(Container): slot: Slot randao_mix: Bytes32 timestamp: uint64 - recent_block_roots: Sequence[Bytes32] + recent_block_roots: Vector[Bytes32, EVM_BLOCK_ROOTS_SIZE] ``` ##### `get_application_state` From 5435324693ba0e24ebd26eb7c948f48c30bc5ac1 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 15:21:27 +0600 Subject: [PATCH 005/127] Add a line break in get_recent_beacon_block_roots Co-authored-by: Danny Ryan --- specs/merge/beacon-chain.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index e2ed48b3e..4d5e42920 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -173,7 +173,10 @@ def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: ```python def get_recent_beacon_block_roots(state: BeaconState, qty: uint64) -> Sequence[Bytes32]: - return [get_block_root_at_slot(state.slot - i) if GENESIS_SLOT + i < state.slot else Bytes32() for i in reversed(range(1, qty + 1))] + return [ + get_block_root_at_slot(state.slot - i) if GENESIS_SLOT + i < state.slot else Bytes32() + for i in reversed(range(1, qty + 1)) + ] ``` #### `get_evm_beacon_block_roots` From 3c9cd855a062db604d9056b10bc316cecd27b0fb Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 15:21:47 +0600 Subject: [PATCH 006/127] Fix spelling Co-authored-by: Danny Ryan --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 4d5e42920..7f27e80e1 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -226,7 +226,7 @@ class BeaconChainData(Container): *Note*: `ApplicationState` class is an abstract class representing ethereum application state. Let `get_application_state(application_state_root: Bytes32) -> ApplicationState` be the function that given the root hash returns a copy of ethereum application state. -The body of the function is implementation dependant. +The body of the function is implementation dependent. ##### `application_state_transition` From a368f5d2240dd6648dce3afd9f458f1e77239635 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 15:31:21 +0600 Subject: [PATCH 007/127] Lable Added/Remove notes with Merge explicitly --- specs/merge/beacon-chain.md | 12 ++++++------ specs/merge/fork-choice.md | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 7f27e80e1..1ae96886e 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -66,7 +66,7 @@ order and append any additional fields to the end. ```python class BeaconBlockBody(phase0.BeaconBlockBody): - application_payload: ApplicationPayload # [Added] application payload + application_payload: ApplicationPayload # [Added in Merge] application payload ``` #### `BeaconState` @@ -88,11 +88,11 @@ class BeaconState(Container): historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Eth1 eth1_data: Eth1Data - # [Removed] eth1_data_votes + # [Removed in Merge] eth1_data_votes eth1_deposit_index: uint64 - # [Added] hash of the root of application state + # [Added in Merge] hash of the root of application state application_state_root: Bytes32 - # [Added] hash of recent application block + # [Added in Merge] hash of recent application block application_block_hash: Bytes32 # Registry validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] @@ -193,9 +193,9 @@ def get_evm_beacon_block_roots(state: BeaconState) -> Sequence[Bytes32]: def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) - process_eth1_data(state, block.body) # [Modified in The Merge] + process_eth1_data(state, block.body) # [Modified in Merge] process_operations(state, block.body) - process_application_payload(state, block.body) # [New in The Merge] + process_application_payload(state, block.body) # [New in Merge] ``` #### Modified `process_eth1_data` diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 9d8f175d0..2870e39b0 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -69,7 +69,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check block is a descendant of the finalized block at the checkpoint finalized slot assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root - # [Added] Check that Eth1 data is correct + # [Added in Merge] Check that Eth1 data is correct assert is_valid_eth1_data(store, block) # Check the block is valid and compute the post-state From b8e16c1610e4af03b38baf6ad98a956aa8317a08 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 15:32:24 +0600 Subject: [PATCH 008/127] Remove min(..., ...) in get_evm_beacon_block_roots --- specs/merge/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 1ae96886e..364cc4377 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -183,8 +183,8 @@ def get_recent_beacon_block_roots(state: BeaconState, qty: uint64) -> Sequence[B ```python def get_evm_beacon_block_roots(state: BeaconState) -> Sequence[Bytes32]: - num_block_roots = min(EVM_BLOCK_ROOTS_SIZE, SLOTS_PER_HISTORICAL_ROOT) - return get_recent_beacon_block_roots(state, num_block_roots) + # EVM_BLOCK_ROOTS_SIZE must be less or equal to SLOTS_PER_HISTORICAL_ROOT + return get_recent_beacon_block_roots(state, EVM_BLOCK_ROOTS_SIZE) ``` ### Block processing From bf151641a73aaab16b79942ad635652051b5c07d Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 17 Mar 2021 22:39:35 +0600 Subject: [PATCH 009/127] Add rebase-to-Altair warning --- specs/merge/beacon-chain.md | 2 ++ specs/merge/validator.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 364cc4377..043bc2afe 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -1,5 +1,7 @@ # Ethereum 2.0 The Merge +**Warning:** This document is based on [Phase 0](../phase0/beacon-chain.md) and considered to be rebased to [Altair](../altair/beacon-chain.md) once the latter is shipped. + **Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 75e804187..aff4263b2 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -1,5 +1,7 @@ # Ethereum 2.0 The Merge +**Warning:** This document is based on [Phase 0](../phase0/validator.md) and considered to be rebased to [Altair](../altair/validator.md) once the latter is shipped. + **Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents From 46fc8a196ddd9ba03426d824d610c65ee1a13d6b Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Sat, 20 Mar 2021 19:21:11 +0600 Subject: [PATCH 010/127] Strip down the merge to the pure consensus upgrade --- specs/merge/beacon-chain.md | 104 ++++++++++-------------------------- specs/merge/fork-choice.md | 33 +++++------- specs/merge/validator.md | 53 +++++++++--------- 3 files changed, 67 insertions(+), 123 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 043bc2afe..8508fa06c 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -12,6 +12,7 @@ - [Introduction](#introduction) - [Constants](#constants) + - [Transition](#transition) - [Execution](#execution) - [Containers](#containers) - [Extended containers](#extended-containers) @@ -22,15 +23,10 @@ - [`ApplicationPayload`](#applicationpayload) - [Helper functions](#helper-functions) - [Misc](#misc) - - [`compute_randao_mix`](#compute_randao_mix) - - [`compute_time_at_slot`](#compute_time_at_slot) - - [Beacon state accessors](#beacon-state-accessors) - - [`get_recent_beacon_block_roots`](#get_recent_beacon_block_roots) - - [`get_evm_beacon_block_roots`](#get_evm_beacon_block_roots) + - [`is_transition_completed`](#is_transition_completed) + - [`is_transition_block`](#is_transition_block) - [Block processing](#block-processing) - - [Modified `process_eth1_data`](#modified-process_eth1_data) - [Application payload processing](#application-payload-processing) - - [`BeaconChainData`](#beaconchaindata) - [`get_application_state`](#get_application_state) - [`application_state_transition`](#application_state_transition) - [`process_application_payload`](#process_application_payload) @@ -45,6 +41,11 @@ It enshrines application execution and validity as a first class citizen at the ## Constants +### Transition +| Name | Value | +| - | - | +| `TRANSITION_TOTAL_DIFFICULTY` | _TBD_ | + ### Execution | Name | Value | @@ -52,7 +53,6 @@ It enshrines application execution and validity as a first class citizen at the | `MAX_BYTES_PER_TRANSACTION_PAYLOAD` | `2**20` | | `MAX_APPLICATION_TRANSACTIONS` | `2**14` | | `BYTES_PER_LOGS_BLOOM` | `2**8` | -| `EVM_BLOCK_ROOTS_SIZE` | `2**8` | ## Containers @@ -73,7 +73,7 @@ class BeaconBlockBody(phase0.BeaconBlockBody): #### `BeaconState` -*Note*: `BeaconState` fields remain unchanged other than the removal of `eth1_data_votes` and addition of `application_state_root` and `application_block_hash`. +*Note*: `BeaconState` fields remain unchanged other than addition of `application_state_root` and `application_block_hash`. ```python @@ -90,7 +90,7 @@ class BeaconState(Container): historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Eth1 eth1_data: Eth1Data - # [Removed in Merge] eth1_data_votes + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] eth1_deposit_index: uint64 # [Added in Merge] hash of the root of application state application_state_root: Bytes32 @@ -153,40 +153,18 @@ class ApplicationPayload(Container): ### Misc -#### `compute_randao_mix` +#### `is_transition_completed` ```python -def compute_randao_mix(state: BeaconState, randao_reveal: BLSSignature) -> Bytes32: - epoch = get_current_epoch(state) - return xor(get_randao_mix(state, epoch), hash(randao_reveal)) +def is_transition_completed(state: BeaconState) -> Boolean: + state.application_block_hash != Bytes32() ``` -#### `compute_time_at_slot` +#### `is_transition_block` ```python -def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: - slots_since_genesis = slot - GENESIS_SLOT - return uint64(state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT) -``` - -### Beacon state accessors - -#### `get_recent_beacon_block_roots` - -```python -def get_recent_beacon_block_roots(state: BeaconState, qty: uint64) -> Sequence[Bytes32]: - return [ - get_block_root_at_slot(state.slot - i) if GENESIS_SLOT + i < state.slot else Bytes32() - for i in reversed(range(1, qty + 1)) - ] -``` - -#### `get_evm_beacon_block_roots` - -```python -def get_evm_beacon_block_roots(state: BeaconState) -> Sequence[Bytes32]: - # EVM_BLOCK_ROOTS_SIZE must be less or equal to SLOTS_PER_HISTORICAL_ROOT - return get_recent_beacon_block_roots(state, EVM_BLOCK_ROOTS_SIZE) +def is_transition_block(state: BeaconState, block_body: BeaconBlockBody) -> boolean: + return state.application_block_hash == Bytes32() and block.body.application_payload.block_hash != Bytes32() ``` ### Block processing @@ -195,34 +173,13 @@ def get_evm_beacon_block_roots(state: BeaconState) -> Sequence[Bytes32]: def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) - process_eth1_data(state, block.body) # [Modified in Merge] + process_eth1_data(state, block.body) process_operations(state, block.body) process_application_payload(state, block.body) # [New in Merge] ``` -#### Modified `process_eth1_data` - -*Note*: The function `process_eth1_data` is modified to update `state.eth1_data` with `eth1_data` of each block. - -```python -def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: - state.eth1_data = body.eth1_data -``` - #### Application payload processing -##### `BeaconChainData` - -*Note*: `BeaconChainData` contains beacon state data that is used by the application state transition function. - -```python -class BeaconChainData(Container): - slot: Slot - randao_mix: Bytes32 - timestamp: uint64 - recent_block_roots: Vector[Bytes32, EVM_BLOCK_ROOTS_SIZE] -``` - ##### `get_application_state` *Note*: `ApplicationState` class is an abstract class representing ethereum application state. @@ -232,8 +189,8 @@ The body of the function is implementation dependent. ##### `application_state_transition` -Let `application_state_transition(application_state: ApplicationState, beacon_chain_data: BeaconChainData, application_payload: ApplicationPayload) -> None` be the transition function of ethereum application state. -The body of the function is implementation dependant. +Let `application_state_transition(application_state: ApplicationState, application_payload: ApplicationPayload) -> None` be the transition function of ethereum application state. +The body of the function is implementation dependent. *Note*: `application_state_transition` must throw `AssertionError` if either the transition itself or one of the post-transition verifications has failed. @@ -245,19 +202,14 @@ def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> No Note: This function is designed to be able to be run in parallel with the other `process_block` sub-functions """ - - # Utilizes `compute_randao_mix` to avoid any assumptions about - # the processing of other `process_block` sub-functions - beacon_chain_data = BeaconChainData( - slot=state.slot, - randao_mix=compute_randao_mix(state, body.randao_reveal), - timestamp=compute_time_at_slot(state.genesis_time, state.slot), - recent_block_roots=get_evm_beacon_block_roots(state) - ) - - application_state = get_application_state(state.application_state_root) - application_state_transition(application_state, beacon_chain_data, body.application_payload) - state.application_state_root = body.application_payload.state_root - state.application_block_hash = body.application_payload.block_hash + if is_transition_completed(state): + application_state = get_application_state(state.application_state_root) + application_state_transition(application_state, body.application_payload) + + state.application_state_root = body.application_payload.state_root + state.application_block_hash = body.application_payload.block_hash + + elif is_transition_block(state, body): + state.application_block_hash = body.application_payload.block_hash ``` diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 2870e39b0..150dba91c 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -9,8 +9,8 @@ - [Introduction](#introduction) - [Helpers](#helpers) - - [`get_eth1_data`](#get_eth1_data) - - [`is_valid_eth1_data`](#is_valid_eth1_data) + - [`get_total_difficulty`](#get_total_difficulty) + - [`is_valid_transition_block`](#is_valid_transition_block) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - [`on_block`](#on_block) @@ -21,37 +21,31 @@ This is the modification of the fork choice according to the executable beacon chain proposal. -*Note*: It introduces the following change. `Eth1Data` included in a block must correspond to the application state produced by the parent block. This acts as an additional filter on the block subtree under consideration for the beacon block fork choice. +*Note*: It introduces the process of transition from the last PoW block to the first PoS block. ### Helpers -#### `get_eth1_data` +#### `get_total_difficulty` -Let `get_eth1_data(state: BeaconState) -> Eth1Data` be the function that returns the `Eth1Data` obtained from the beacon state. +Let `get_total_difficulty(hash: Bytes32) -> uint256` be the function that returns the total difficulty of the PoW block specified by its hash. -*Note*: This is mostly a function of the state of the beacon chain deposit contract. It can be read from the application state and/or logs. The `block_hash` value of `Eth1Data` must be set to `state.application_block_hash`. +*Note*: The function returns `0` if the block is either not yet processed or considered invalid. The latter two cases are considered indistinguishable to the current implementation of JSON-RPC. -#### `is_valid_eth1_data` +#### `is_valid_transition_block` Used by fork-choice handler, `on_block` ```python -def is_valid_eth1_data(store: Store, block: BeaconBlock) -> boolean: - parent_state = store.block_states[block.parent_root] - expected_eth1_data = get_eth1_data(parent_state) - actual_eth1_data = block.body.eth1_data - - is_correct_root = expected_eth1_data.deposit_root == actual_eth1_data.deposit_root - is_correct_count = expected_eth1_data.deposit_count == actual_eth1_data.deposit_count - is_correct_block_hash = expected_eth1_data.block_hash == actual_eth1_data.block_hash - return is_correct_root and is_correct_count and is_correct_block_hash +def is_valid_transition_block(block: BeaconBlock) -> boolean: + total_difficulty = get_total_difficulty(block.body.application_payload.block_hash) + return total_difficulty >= TRANSITION_TOTAL_DIFFICULTY ``` ### Updated fork-choice handlers #### `on_block` -*Note*: The only modification is the addition of the `Eth1Data` validity assumption. +*Note*: The only modification is the addition of the verification of transition block conditions. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: @@ -69,8 +63,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check block is a descendant of the finalized block at the checkpoint finalized slot assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root - # [Added in Merge] Check that Eth1 data is correct - assert is_valid_eth1_data(store, block) + # [Added in Merge] Consider delaying the beacon block processing until PoW block is accepted by the application node + if is_transition_block(pre_state, block.body): + assert is_valid_transition_block(block) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/merge/validator.md b/specs/merge/validator.md index aff4263b2..d55f2ea3e 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -15,9 +15,9 @@ - [Beacon chain responsibilities](#beacon-chain-responsibilities) - [Block proposal](#block-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - - [Eth1 data](#eth1-data) - - [`get_eth1_data`](#get_eth1_data) - [Application Payload](#application-payload) + - [`ApplicaitonBlock`](#applicaitonblock) + - [`get_pow_chain_head`](#get_pow_chain_head) - [`produce_application_payload`](#produce_application_payload) @@ -35,44 +35,41 @@ All terminology, constants, functions, and protocol mechanics defined in the upd ## Beacon chain responsibilities -All validator responsibilities remain unchanged other than those noted below. Namely, the modification of `Eth1Data` and the addition of `ApplicationPayload`. +All validator responsibilities remain unchanged other than those noted below. Namely, the transition block handling and the addition of `ApplicationPayload`. ### Block proposal #### Constructing the `BeaconBlockBody` -##### Eth1 data - -The `block.body.eth1_data` field is for block proposers to publish recent Eth1 data. This recent data contains deposit root (as calculated by the `get_deposit_root()` method of the deposit contract) and deposit count after processing of the parent block. The fork choice verifies Eth1 data of a block, then `state.eth1_data` updates immediately allowing new deposits to be processed. Each deposit in `block.body.deposits` must verify against `state.eth1_data.deposit_root`. - -###### `get_eth1_data` - -Let `get_eth1_data(state: BeaconState) -> Eth1Data` be the function that returns the `Eth1Data` obtained from the beacon state. - -*Note*: This is mostly a function of the state of the beacon chain deposit contract. It can be read from the application state and/or logs. The `block_hash` value of `Eth1Data` must be set to `state.application_block_hash`. - -Set `block.body.eth1_data = get_eth1_data(state)`. - - ##### Application Payload +###### `ApplicaitonBlock` +```python +class PowBlock(Container): + block_hash: Bytes32 + total_difficulty: uint256 +``` + +###### `get_pow_chain_head` + +Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific. + ###### `produce_application_payload` -Let `produce_application_payload(parent_hash: Bytes32, beacon_chain_data: BeaconChainData) -> ApplicationPayload` be the function that produces new instance of application payload. -The body of this function is implementation dependant. +Let `produce_application_payload(parent_hash: Bytes32) -> ApplicationPayload` be the function that produces new instance of application payload. +The body of this function is implementation dependent. -* Let `randao_reveal` be `block.body.randao_reveal` of the block that is being produced * Set `block.body.application_payload = get_application_payload(state, randao_reveal)` where: ```python -def get_application_payload(state: BeaconState, randao_reveal: BLSSignature) -> ApplicationPayload: - application_parent_hash = state.application_block_hash - beacon_chain_data = BeaconChainData( - slot=state.slot, - randao_mix=compute_randao_mix(state, randao_reveal), - timestamp=compute_time_at_slot(state.genesis_time, state.slot), - recent_block_roots=get_evm_beacon_block_roots(state) - ) +def get_application_payload(state: BeaconState) -> ApplicationPayload: + if is_transition_completed(state): + application_parent_hash = state.application_block_hash + return produce_application_payload(application_parent_hash) - return produce_application_payload(application_parent_hash, beacon_chain_data) + pow_block = get_pow_chain_head() + if pow_block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY: + return ApplicationPayload(block_hash = pow_block.block_hash) + else: + return ApplicationPayload() ``` From 3420e51a0f28ef558198b24422c2f2f895b62abb Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Sat, 20 Mar 2021 21:46:20 +0600 Subject: [PATCH 011/127] Verify transition block to be assembled correctly --- specs/merge/beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 8508fa06c..661e0689d 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -211,5 +211,6 @@ def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> No state.application_block_hash = body.application_payload.block_hash elif is_transition_block(state, body): + assert body.application_payload == ApplicationPayload(block_hash = body.application_payload.block_hash) state.application_block_hash = body.application_payload.block_hash ``` From 24dc8a277fd732e64cd5c7fe8cb1168b8149860c Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 22 Mar 2021 20:54:44 +0600 Subject: [PATCH 012/127] Fix block_body variable in is_transition_block Co-authored-by: terence tsao --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 661e0689d..d083a3139 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -164,7 +164,7 @@ def is_transition_completed(state: BeaconState) -> Boolean: ```python def is_transition_block(state: BeaconState, block_body: BeaconBlockBody) -> boolean: - return state.application_block_hash == Bytes32() and block.body.application_payload.block_hash != Bytes32() + return state.application_block_hash == Bytes32() and block_body.application_payload.block_hash != Bytes32() ``` ### Block processing From 38a455c79f5803af8d84b4b14cadf55ff780bb8a Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 22 Mar 2021 20:58:34 +0600 Subject: [PATCH 013/127] Verify that ApplicationPayload is zeroed before the transition --- specs/merge/beacon-chain.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index d083a3139..710bc5935 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -213,4 +213,7 @@ def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> No elif is_transition_block(state, body): assert body.application_payload == ApplicationPayload(block_hash = body.application_payload.block_hash) state.application_block_hash = body.application_payload.block_hash + + else: + assert body.application_payload == ApplicationPayload() ``` From 83453d212ede8453b0d46bb591f67bc50ce16474 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 22 Mar 2021 21:14:31 +0600 Subject: [PATCH 014/127] Simplify merge.BeaconState definition --- specs/merge/beacon-chain.md | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 710bc5935..b2813a2d9 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -77,40 +77,11 @@ class BeaconBlockBody(phase0.BeaconBlockBody): ```python -class BeaconState(Container): - # Versioning - genesis_time: uint64 - genesis_validators_root: Root - slot: Slot - fork: Fork - # History - latest_block_header: BeaconBlockHeader - block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] - state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] - historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] - # Eth1 - eth1_data: Eth1Data - eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] - eth1_deposit_index: uint64 +class BeaconState(phase0.BeaconState): # [Added in Merge] hash of the root of application state application_state_root: Bytes32 # [Added in Merge] hash of recent application block application_block_hash: Bytes32 - # Registry - validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] - balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] - # Randomness - randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] - # Slashings - slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances - # Attestations - previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - # Finality - justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch - previous_justified_checkpoint: Checkpoint # Previous epoch snapshot - current_justified_checkpoint: Checkpoint - finalized_checkpoint: Checkpoint ``` ### New containers From 7e6ac4e7f7fe6d7399c72b54072f7a2313df1c30 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 22 Mar 2021 21:20:05 +0600 Subject: [PATCH 015/127] Boolean -> boolean --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index b2813a2d9..ca35679ed 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -127,7 +127,7 @@ class ApplicationPayload(Container): #### `is_transition_completed` ```python -def is_transition_completed(state: BeaconState) -> Boolean: +def is_transition_completed(state: BeaconState) -> boolean: state.application_block_hash != Bytes32() ``` From 96de910b22ba2364f1badc61f0e0c8b5dbf110bc Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 22 Mar 2021 21:55:35 +0600 Subject: [PATCH 016/127] Distinguish invalid and not processed transition block --- specs/merge/fork-choice.md | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 150dba91c..523316dfc 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -9,7 +9,8 @@ - [Introduction](#introduction) - [Helpers](#helpers) - - [`get_total_difficulty`](#get_total_difficulty) + - [`PowBlock`](#powblock) + - [`get_pow_block`](#get_pow_block) - [`is_valid_transition_block`](#is_valid_transition_block) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - [`on_block`](#on_block) @@ -25,20 +26,29 @@ This is the modification of the fork choice according to the executable beacon c ### Helpers -#### `get_total_difficulty` +#### `PowBlock` -Let `get_total_difficulty(hash: Bytes32) -> uint256` be the function that returns the total difficulty of the PoW block specified by its hash. +```python +class PowBlock(Container): + is_processed: boolean + is_valid: boolean + total_difficulty: uint256 +``` -*Note*: The function returns `0` if the block is either not yet processed or considered invalid. The latter two cases are considered indistinguishable to the current implementation of JSON-RPC. +#### `get_pow_block` + +Let `get_pow_block(hash: Bytes32) -> PowBlock` be the function that given the hash of the PoW block returns its data. + +*Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that hasn't been processed yet. Either extending of existing method or implementing a new one is required. #### `is_valid_transition_block` Used by fork-choice handler, `on_block` ```python -def is_valid_transition_block(block: BeaconBlock) -> boolean: - total_difficulty = get_total_difficulty(block.body.application_payload.block_hash) - return total_difficulty >= TRANSITION_TOTAL_DIFFICULTY +def is_valid_transition_block(block: PowBlock) -> boolean: + is_total_difficulty_reached = block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY + return block.is_processed and block.is_valid and is_total_difficulty_reached ``` ### Updated fork-choice handlers @@ -65,7 +75,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # [Added in Merge] Consider delaying the beacon block processing until PoW block is accepted by the application node if is_transition_block(pre_state, block.body): - assert is_valid_transition_block(block) + pow_block = get_pow_block(block.body.application_payload.block_hash) + assert is_valid_transition_block(pow_block) # Check the block is valid and compute the post-state state = pre_state.copy() From ea5f606bd095ad1c569629df1ce2e57b1048ae30 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 24 Mar 2021 16:30:29 +0600 Subject: [PATCH 017/127] Address various cleanups and formatting suggestions --- specs/merge/beacon-chain.md | 32 +++++++++++++++----------------- specs/merge/fork-choice.md | 6 +++--- specs/merge/validator.md | 29 ++++++++++++++++------------- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index ca35679ed..c343c2ec9 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -1,6 +1,6 @@ # Ethereum 2.0 The Merge -**Warning:** This document is based on [Phase 0](../phase0/beacon-chain.md) and considered to be rebased to [Altair](../altair/beacon-chain.md) once the latter is shipped. +**Warning:** This document is currently based on [Phase 0](../phase0/beacon-chain.md) but will be rebased to [Altair](../altair/beacon-chain.md) once the latter is shipped. **Notice**: This document is a work-in-progress for researchers and implementers. @@ -36,23 +36,24 @@ ## Introduction -This is a patch implementing executable beacon chain proposal. -It enshrines application execution and validity as a first class citizen at the core of the beacon chain. +This is a patch implementing the executable beacon chain proposal. +It enshrines application-layer execution and validity as a first class citizen at the core of the beacon chain. ## Constants ### Transition + | Name | Value | | - | - | -| `TRANSITION_TOTAL_DIFFICULTY` | _TBD_ | +| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** | ### Execution | Name | Value | | - | - | -| `MAX_BYTES_PER_TRANSACTION_PAYLOAD` | `2**20` | -| `MAX_APPLICATION_TRANSACTIONS` | `2**14` | -| `BYTES_PER_LOGS_BLOOM` | `2**8` | +| `MAX_BYTES_PER_TRANSACTION_PAYLOAD` | `uint64(2**20)` (= 1,048,576) | +| `MAX_APPLICATION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) | +| `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | ## Containers @@ -73,15 +74,13 @@ class BeaconBlockBody(phase0.BeaconBlockBody): #### `BeaconState` -*Note*: `BeaconState` fields remain unchanged other than addition of `application_state_root` and `application_block_hash`. - +*Note*: `BeaconState` fields remain unchanged other than addition of `application_state_root` and `application_block_hash`. ```python class BeaconState(phase0.BeaconState): - # [Added in Merge] hash of the root of application state - application_state_root: Bytes32 - # [Added in Merge] hash of recent application block - application_block_hash: Bytes32 + # Application-layer + application_state_root: Bytes32 # [New in Merge] + application_block_hash: Bytes32 # [New in Merge] ``` ### New containers @@ -128,7 +127,7 @@ class ApplicationPayload(Container): ```python def is_transition_completed(state: BeaconState) -> boolean: - state.application_block_hash != Bytes32() + return state.application_block_hash != Bytes32() ``` #### `is_transition_block` @@ -170,8 +169,7 @@ The body of the function is implementation dependent. ```python def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> None: """ - Note: This function is designed to be able to be run in parallel with - the other `process_block` sub-functions + Note: This function is designed to be able to be run in parallel with the other `process_block` sub-functions """ if is_transition_completed(state): @@ -182,7 +180,7 @@ def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> No state.application_block_hash = body.application_payload.block_hash elif is_transition_block(state, body): - assert body.application_payload == ApplicationPayload(block_hash = body.application_payload.block_hash) + assert body.application_payload == ApplicationPayload(block_hash=body.application_payload.block_hash) state.application_block_hash = body.application_payload.block_hash else: diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 523316dfc..25f83dbf6 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -39,11 +39,11 @@ class PowBlock(Container): Let `get_pow_block(hash: Bytes32) -> PowBlock` be the function that given the hash of the PoW block returns its data. -*Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that hasn't been processed yet. Either extending of existing method or implementing a new one is required. +*Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required. #### `is_valid_transition_block` -Used by fork-choice handler, `on_block` +Used by fork-choice handler, `on_block`. ```python def is_valid_transition_block(block: PowBlock) -> boolean: @@ -73,7 +73,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check block is a descendant of the finalized block at the checkpoint finalized slot assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root - # [Added in Merge] Consider delaying the beacon block processing until PoW block is accepted by the application node + # [New in Merge] Consider delaying the beacon block processing until PoW block is accepted by the application node if is_transition_block(pre_state, block.body): pow_block = get_pow_block(block.body.application_payload.block_hash) assert is_valid_transition_block(pow_block) diff --git a/specs/merge/validator.md b/specs/merge/validator.md index d55f2ea3e..4f4c28b3d 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -1,6 +1,6 @@ # Ethereum 2.0 The Merge -**Warning:** This document is based on [Phase 0](../phase0/validator.md) and considered to be rebased to [Altair](../altair/validator.md) once the latter is shipped. +**Warning:** This document is currently based on [Phase 0](../phase0/validator.md) but will be rebased to [Altair](../altair/validator.md) once the latter is shipped. **Notice**: This document is a work-in-progress for researchers and implementers. @@ -16,7 +16,7 @@ - [Block proposal](#block-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Application Payload](#application-payload) - - [`ApplicaitonBlock`](#applicaitonblock) + - [`PowBlock`](#powblock) - [`get_pow_chain_head`](#get_pow_chain_head) - [`produce_application_payload`](#produce_application_payload) @@ -43,7 +43,7 @@ All validator responsibilities remain unchanged other than those noted below. Na ##### Application Payload -###### `ApplicaitonBlock` +###### `PowBlock` ```python class PowBlock(Container): block_hash: Bytes32 @@ -59,17 +59,20 @@ Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of Let `produce_application_payload(parent_hash: Bytes32) -> ApplicationPayload` be the function that produces new instance of application payload. The body of this function is implementation dependent. -* Set `block.body.application_payload = get_application_payload(state, randao_reveal)` where: +* Set `block.body.application_payload = get_application_payload(state)` where: ```python def get_application_payload(state: BeaconState) -> ApplicationPayload: - if is_transition_completed(state): - application_parent_hash = state.application_block_hash - return produce_application_payload(application_parent_hash) - - pow_block = get_pow_chain_head() - if pow_block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY: - return ApplicationPayload(block_hash = pow_block.block_hash) - else: - return ApplicationPayload() + if not is_transition_completed(state): + pow_block = get_pow_chain_head() + if pow_block.total_difficulty < TRANSITION_TOTAL_DIFFICULTY: + # Pre-merge, empty payload + return ApplicationPayload() + else: + # Signify merge via last PoW block_hash and an otherwise empty payload + return ApplicationPayload(block_hash=pow_block.block_hash) + + # Post-merge, normal payload + application_parent_hash = state.application_block_hash + return produce_application_payload(state.application_block_hash) ``` From 63ae9f2bdbe7b54f5a188e6c64facb0fc5563c82 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 24 Mar 2021 20:58:31 +0600 Subject: [PATCH 018/127] Standardise PowBlock between fork-choice and validator --- specs/merge/fork-choice.md | 1 + specs/merge/validator.md | 8 -------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 25f83dbf6..c022de4ab 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -30,6 +30,7 @@ This is the modification of the fork choice according to the executable beacon c ```python class PowBlock(Container): + block_hash: Bytes32 is_processed: boolean is_valid: boolean total_difficulty: uint256 diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 4f4c28b3d..110a2a883 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -16,7 +16,6 @@ - [Block proposal](#block-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Application Payload](#application-payload) - - [`PowBlock`](#powblock) - [`get_pow_chain_head`](#get_pow_chain_head) - [`produce_application_payload`](#produce_application_payload) @@ -43,13 +42,6 @@ All validator responsibilities remain unchanged other than those noted below. Na ##### Application Payload -###### `PowBlock` -```python -class PowBlock(Container): - block_hash: Bytes32 - total_difficulty: uint256 -``` - ###### `get_pow_chain_head` Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific. From 6c0a0ab3f214872f8ac860e219bd0523fe416ae0 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 24 Mar 2021 12:36:25 -0600 Subject: [PATCH 019/127] patch test_filtered_block_tree test generator based on @ajsutton report --- .../pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index 318db496a..c598a3a7f 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -211,7 +211,7 @@ def test_filtered_block_tree(spec, state): test_steps.append({ 'checks': { 'head': get_formatted_head_output(spec, store), - 'justified_checkpoint_root': encode_hex(store.justified_checkpoint.hash_tree_root()), + 'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root), } }) From ee5ecf8e2b1980daecde0732ab5e7ceb6fd7cd4c Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 25 Mar 2021 17:49:13 +0600 Subject: [PATCH 020/127] Address a new portion of comments and fixes --- specs/merge/beacon-chain.md | 11 ++++------- specs/merge/fork-choice.md | 6 ++++-- specs/merge/validator.md | 14 +++++++------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index c343c2ec9..b8362771e 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -69,7 +69,7 @@ order and append any additional fields to the end. ```python class BeaconBlockBody(phase0.BeaconBlockBody): - application_payload: ApplicationPayload # [Added in Merge] application payload + application_payload: ApplicationPayload # [New in Merge] application payload ``` #### `BeaconState` @@ -79,8 +79,8 @@ class BeaconBlockBody(phase0.BeaconBlockBody): ```python class BeaconState(phase0.BeaconState): # Application-layer - application_state_root: Bytes32 # [New in Merge] - application_block_hash: Bytes32 # [New in Merge] + application_state_root: Bytes32 # [New in Merge] + application_block_hash: Bytes32 # [New in Merge] ``` ### New containers @@ -96,7 +96,7 @@ class Transaction(Container): gas_limit: uint64 recipient: Bytes20 value: uint256 - input: List[Bytes1, MAX_BYTES_PER_TRANSACTION_PAYLOAD] + data: List[byte, MAX_BYTES_PER_TRANSACTION_PAYLOAD] v: uint256 r: uint256 s: uint256 @@ -115,7 +115,6 @@ class ApplicationPayload(Container): gas_used: uint64 receipt_root: Bytes32 logs_bloom: Vector[Bytes1, BYTES_PER_LOGS_BLOOM] - difficulty: uint64 # Temporary field, will be removed later on transactions: List[Transaction, MAX_APPLICATION_TRANSACTIONS] ``` @@ -178,11 +177,9 @@ def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> No state.application_state_root = body.application_payload.state_root state.application_block_hash = body.application_payload.block_hash - elif is_transition_block(state, body): assert body.application_payload == ApplicationPayload(block_hash=body.application_payload.block_hash) state.application_block_hash = body.application_payload.block_hash - else: assert body.application_payload == ApplicationPayload() ``` diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index c022de4ab..d299a8247 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -49,7 +49,7 @@ Used by fork-choice handler, `on_block`. ```python def is_valid_transition_block(block: PowBlock) -> boolean: is_total_difficulty_reached = block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY - return block.is_processed and block.is_valid and is_total_difficulty_reached + return block.is_valid and is_total_difficulty_reached ``` ### Updated fork-choice handlers @@ -74,9 +74,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check block is a descendant of the finalized block at the checkpoint finalized slot assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root - # [New in Merge] Consider delaying the beacon block processing until PoW block is accepted by the application node + # [New in Merge] if is_transition_block(pre_state, block.body): pow_block = get_pow_block(block.body.application_payload.block_hash) + # Delay consideration of block until PoW block is processed by the PoW node + assert pow_block.is_processed assert is_valid_transition_block(pow_block) # Check the block is valid and compute the post-state diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 110a2a883..49f7f6137 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -56,13 +56,13 @@ The body of this function is implementation dependent. ```python def get_application_payload(state: BeaconState) -> ApplicationPayload: if not is_transition_completed(state): - pow_block = get_pow_chain_head() - if pow_block.total_difficulty < TRANSITION_TOTAL_DIFFICULTY: - # Pre-merge, empty payload - return ApplicationPayload() - else: - # Signify merge via last PoW block_hash and an otherwise empty payload - return ApplicationPayload(block_hash=pow_block.block_hash) + pow_block = get_pow_chain_head() + if pow_block.total_difficulty < TRANSITION_TOTAL_DIFFICULTY: + # Pre-merge, empty payload + return ApplicationPayload() + else: + # Signify merge via last PoW block_hash and an otherwise empty payload + return ApplicationPayload(block_hash=pow_block.block_hash) # Post-merge, normal payload application_parent_hash = state.application_block_hash From a23bde347be4184877a22788d81f32d541ce3202 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 25 Mar 2021 17:51:32 +0600 Subject: [PATCH 021/127] Bytes1 to byte in ApplicationPayload.logs_bloom --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index b8362771e..0d434db4f 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -114,7 +114,7 @@ class ApplicationPayload(Container): gas_limit: uint64 gas_used: uint64 receipt_root: Bytes32 - logs_bloom: Vector[Bytes1, BYTES_PER_LOGS_BLOOM] + logs_bloom: Vector[byte, BYTES_PER_LOGS_BLOOM] transactions: List[Transaction, MAX_APPLICATION_TRANSACTIONS] ``` From 260a0a527378505a333abbcab083502fe93d5e4b Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 25 Mar 2021 17:53:15 +0600 Subject: [PATCH 022/127] Polish merge/fork-choice.md --- specs/merge/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index d299a8247..430128c12 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -76,8 +76,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # [New in Merge] if is_transition_block(pre_state, block.body): - pow_block = get_pow_block(block.body.application_payload.block_hash) # Delay consideration of block until PoW block is processed by the PoW node + pow_block = get_pow_block(block.body.application_payload.block_hash) assert pow_block.is_processed assert is_valid_transition_block(pow_block) From 81a2c2c2b5b8536adfaddd8ff42f7372a1122b4f Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 25 Mar 2021 18:41:00 +0600 Subject: [PATCH 023/127] Use ByteList[N] and ByteVector[N] types --- specs/merge/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 0d434db4f..694df161b 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -96,7 +96,7 @@ class Transaction(Container): gas_limit: uint64 recipient: Bytes20 value: uint256 - data: List[byte, MAX_BYTES_PER_TRANSACTION_PAYLOAD] + data: ByteList[MAX_BYTES_PER_TRANSACTION_PAYLOAD] v: uint256 r: uint256 s: uint256 @@ -114,7 +114,7 @@ class ApplicationPayload(Container): gas_limit: uint64 gas_used: uint64 receipt_root: Bytes32 - logs_bloom: Vector[byte, BYTES_PER_LOGS_BLOOM] + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] transactions: List[Transaction, MAX_APPLICATION_TRANSACTIONS] ``` From 020895e35d03ae8615707039bd4a46ed398d9286 Mon Sep 17 00:00:00 2001 From: Alex Vlasov Date: Fri, 26 Mar 2021 00:03:21 +0300 Subject: [PATCH 024/127] Typing problems fixes (#2271) * Typing problem fixed: `process_block_header` passes `Bytes32()` to `state_root` of `BeaconBlockHeader`, which type is `Root` * Typing problem fixed in `initialize_beacon_state_from_eth1`: `len` returns an `int` value, while `deposit_count=uint64` of `Eth1Data` has type `uint64` * Typing problem fixed in `process_rewards_and_penalties`: `numerator` of type `int` passed to `weight` parameter of `get_flag_index_deltas`, which has type `uint64` * Typing problem fixed in `process_attestation`; `False` passes as `crosslink_success` parameter of `PendingAttestation`, which has type `boolean`. `False` is an instance of `(python.)bool` and is not an instance of `(ssz.)boolean` * Typing problem fixed: `shard_data_roots` of `ShardTransition` has type `List[Bytes32]`, but its elements are used as if they were `Root` values, e.g. in `process_chunk_challenge` method: passed to `data_root` of `CustodyChunkChallengeRecord` which has type `Root` * Typing problem fixed in `process_custody_final_updates`: `index` has type `int`, while `validator_indices_in_records` has type `Set[ValidatorIndex]`, so tesing whether `index in validator_indices_in_records` can be risky, depending on implementation details. `ValidatorIndex(index) in validator_indices_in_records` is a safer variant. * Typing problem fixed: `slashed` parameter of `pack_compact_validator` has type `(python.)bool`, however in `committee_to_compact_committee` a value of `(ssz.)boolean` is passed as a value of the parameter * Typing problem fixed: `inactivity_scores` is a `List[uint64,...]`, while it is intialized/appended with values of `(python.)int` type * fixed according to @protolambda suggestions * changed types of _WEIGHT constants and appropriate variables/parameters, according to @protolambda suggestions * revert code formatting back * Introduced ZERO_ROOT according to @protolambda 's suggestion * Reverted back to , according to @protolambda comments --- specs/altair/beacon-chain.md | 14 +++++++------- specs/altair/fork.md | 2 +- specs/phase0/beacon-chain.md | 2 +- specs/phase1/beacon-chain.md | 8 ++++---- specs/phase1/custody-game.md | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 81e7af14b..1ce78ad7c 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -84,11 +84,11 @@ Altair is the first beacon chain hard fork. Its main features are: | Name | Value | | - | - | -| `TIMELY_HEAD_WEIGHT` | `12` | -| `TIMELY_SOURCE_WEIGHT` | `12` | -| `TIMELY_TARGET_WEIGHT` | `24` | -| `SYNC_REWARD_WEIGHT` | `8` | -| `WEIGHT_DENOMINATOR` | `64` | +| `TIMELY_HEAD_WEIGHT` | `uint64(12)` | +| `TIMELY_SOURCE_WEIGHT` | `uint64(12)` | +| `TIMELY_TARGET_WEIGHT` | `uint64(24)` | +| `SYNC_REWARD_WEIGHT` | `uint64(8)` | +| `WEIGHT_DENOMINATOR` | `uint64(64)` | *Note*: The sum of the weight fractions (7/8) plus the proposer inclusion fraction (1/8) equals 1. @@ -234,7 +234,7 @@ def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, s #### `get_flag_indices_and_weights` ```python -def get_flag_indices_and_weights() -> Sequence[Tuple[int, int]]: +def get_flag_indices_and_weights() -> Sequence[Tuple[int, uint64]]: return ( (TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_WEIGHT), (TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_WEIGHT), @@ -517,7 +517,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: state.balances.append(amount) state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.inactivity_scores.append(0) + state.inactivity_scores.append(uint64(0)) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) diff --git a/specs/altair/fork.md b/specs/altair/fork.md index 1fb87d554..df85f7c3c 100644 --- a/specs/altair/fork.md +++ b/specs/altair/fork.md @@ -76,7 +76,7 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState: current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, # Inactivity - inactivity_scores=[0 for _ in range(len(pre.validators))], + inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], ) # Fill in sync committees post.current_sync_committee = get_sync_committee(post, get_current_epoch(post)) diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index e56a8ea22..e2a17dcfc 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -1159,7 +1159,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, state = BeaconState( genesis_time=eth1_timestamp + GENESIS_DELAY, fork=fork, - eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)), + eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy ) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 21e9751fe..5acc496d6 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -393,7 +393,7 @@ class ShardTransition(Container): shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION] # Shard data roots # The root is of ByteList[MAX_SHARD_BLOCK_SIZE] - shard_data_roots: List[Bytes32, MAX_SHARD_BLOCKS_PER_ATTESTATION] + shard_data_roots: List[Root, MAX_SHARD_BLOCKS_PER_ATTESTATION] # Intermediate shard states shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION] # Proposer signature aggregate @@ -431,7 +431,7 @@ def pack_compact_validator(index: ValidatorIndex, slashed: bool, balance_in_incr Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with the unpacking function. """ - return (index << 16) + (slashed << 15) + balance_in_increments + return (index << 16) + (uint64(slashed) << 15) + balance_in_increments ``` #### `unpack_compact_validator` @@ -457,7 +457,7 @@ def committee_to_compact_committee(state: BeaconState, committee: Sequence[Valid """ validators = [state.validators[i] for i in committee] compact_validators = [ - pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT) + pack_compact_validator(i, bool(v.slashed), v.effective_balance // EFFECTIVE_BALANCE_INCREMENT) for i, v in zip(committee, validators) ] pubkeys = [v.pubkey for v in validators] @@ -807,7 +807,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: data=attestation.data, inclusion_delay=state.slot - attestation.data.slot, proposer_index=get_beacon_proposer_index(state), - crosslink_success=False, # To be filled in during process_shard_transitions + crosslink_success=boolean(False), # To be filled in during process_shard_transitions ) if attestation.data.target.epoch == get_current_epoch(state): state.current_epoch_attestations.append(pending_attestation) diff --git a/specs/phase1/custody-game.md b/specs/phase1/custody-game.md index 33c2a0397..136116c2f 100644 --- a/specs/phase1/custody-game.md +++ b/specs/phase1/custody-game.md @@ -583,7 +583,7 @@ def process_custody_final_updates(state: BeaconState) -> None: for index, validator in enumerate(state.validators): if validator.exit_epoch != FAR_FUTURE_EPOCH: not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH - if index in validator_indices_in_records or not_all_secrets_are_revealed: + if ValidatorIndex(index) in validator_indices_in_records or not_all_secrets_are_revealed: # Delay withdrawable epochs if challenge records are not empty or not all # custody secrets revealed validator.withdrawable_epoch = FAR_FUTURE_EPOCH From f9ba7f616e2f8114d2abdf25ef0777bb0dc84532 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 26 Mar 2021 11:38:47 -0700 Subject: [PATCH 025/127] Fix typo from earlier edit to field name The field name `aggregation_bits` was changed from `aggregate_bits` to reflect the same usage under the attestation scheme. This PR cleans up left-over references to the old field name in the section header and TOC. --- specs/altair/validator.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 25897b97e..4bca19476 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -39,7 +39,7 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./ - [Slot](#slot) - [Beacon block root](#beacon-block-root) - [Subcommittee index](#subcommittee-index) - - [Aggregate bits](#aggregate-bits) + - [Aggregation bits](#aggregation-bits) - [Signature](#signature) - [Broadcast sync committee contribution](#broadcast-sync-committee-contribution) - [Sync committee subnet stability](#sync-committee-subnet-stability) @@ -350,7 +350,7 @@ Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_ Set `contribution.subcommittee_index` to the index for the subcommittee index corresponding to the subcommittee assigned to this subnet. This index matches the `subnet_id` used to derive the topic name. -###### Aggregate bits +###### Aggregation bits Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. An aggregator needs to find the index in the sync committee (as returned by `get_sync_committee_indices`) for a given validator referenced by `sync_committee_signature.validator_index` and map the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is the one set in the `Bitvector`. @@ -419,4 +419,4 @@ Some early sync committee rewards may be missed while the initial subnets form. * To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive. Validators should join their member subnet at the beginning of the epoch they have randomly selected. For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`. -Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period. \ No newline at end of file +Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period. From 875aac4c439502fed0967fa8bf4183c7d0ab9e6e Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 26 Mar 2021 11:50:48 -0700 Subject: [PATCH 026/127] Fix aggregation bits example in Altair validator guide The sub-index calculation was incorrect when demonstrating how to map a validator in the sync committee to a particular bit in the `aggregation_bits` in the sync committee aggregation scheme. The correct index is given in this commit. --- specs/altair/validator.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 25897b97e..173a1e713 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -354,7 +354,7 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. An aggregator needs to find the index in the sync committee (as returned by `get_sync_committee_indices`) for a given validator referenced by `sync_committee_signature.validator_index` and map the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is the one set in the `Bitvector`. -For example, a validator with index `2044` could be at index `15` in the current sync committee. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. +For example, a validator with index `2044` could be at index `135` in the current sync committee. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. Also note that a validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`. ###### Signature @@ -419,4 +419,4 @@ Some early sync committee rewards may be missed while the initial subnets form. * To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive. Validators should join their member subnet at the beginning of the epoch they have randomly selected. For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`. -Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period. \ No newline at end of file +Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period. From ccfbf5c70a67f1af76d08a432be4b212f2109edf Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 26 Mar 2021 11:29:01 -0700 Subject: [PATCH 027/127] Fix use of block root in Altair validator guide --- specs/altair/validator.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 25897b97e..764c89e37 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -266,15 +266,16 @@ If a validator is in the current sync committee (i.e. `is_assigned_to_sync_commi This logic is triggered upon the same conditions as when producing an attestation. Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first. -`get_sync_committee_signature` assumes `state` is the head state corresponding to processing the block at the current slot as determined by the fork choice (including any empty slots processed with `process_slots`), `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. +`get_sync_committee_signature` assumes `state` is the head state corresponding to processing the block at the current slot as determined by the fork choice (including any empty slots processed with `process_slots`), `block_root` is the root of the head block whose processing results in `state`, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. ```python def get_sync_committee_signature(state: BeaconState, + block_root: Root, validator_index: ValidatorIndex, privkey: int) -> SyncCommitteeSignature: epoch = get_current_epoch(state) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch) - signing_root = compute_signing_root(get_block_root_at_slot(state, state.slot), domain) + signing_root = compute_signing_root(block_root, domain) signature = bls.Sign(privkey, signing_root) return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature) From 41a087a78dd4ff88a2222b709843bf4472b9f3c9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 26 Mar 2021 13:12:53 -0600 Subject: [PATCH 028/127] minor edits from code review Co-authored-by: terence tsao --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 694df161b..9ddd448a4 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -87,7 +87,7 @@ class BeaconState(phase0.BeaconState): #### `Transaction` -Application transaction fields structured as an SSZ object for inclusion in an `ApplicationPayload` contained within a `BeaconBlock`. +Application transaction fields structured as an SSZ object for inclusion in an `ApplicationPayload` contained within a `BeaconBlockBody`. ```python class Transaction(Container): From 223aba3e782aa5aee755c784ab7a79b09995fc69 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 26 Mar 2021 13:20:23 -0600 Subject: [PATCH 029/127] byte-list for opaque transaction payload --- specs/merge/beacon-chain.md | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 9ddd448a4..ce6df0dd9 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -39,6 +39,14 @@ This is a patch implementing the executable beacon chain proposal. It enshrines application-layer execution and validity as a first class citizen at the core of the beacon chain. +## Custom types + +We define the following Python custom types for type hinting and readability: + +| Name | SSZ equivalent | Description | +| - | - | - | +| `OpaqueTransaction` | `ByteList[MAX_BYTES_PER_OPAQUE_TRANSACTION]` | a byte-list containing a single [typed transaction envelope](https://eips.ethereum.org/EIPS/eip-2718#opaque-byte-array-rather-than-an-rlp-array) structured as `TransactionType \|\| TransactionPayload` | + ## Constants ### Transition @@ -51,11 +59,10 @@ It enshrines application-layer execution and validity as a first class citizen a | Name | Value | | - | - | -| `MAX_BYTES_PER_TRANSACTION_PAYLOAD` | `uint64(2**20)` (= 1,048,576) | +| `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) | | `MAX_APPLICATION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) | | `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | - ## Containers ### Extended containers @@ -85,26 +92,9 @@ class BeaconState(phase0.BeaconState): ### New containers -#### `Transaction` - -Application transaction fields structured as an SSZ object for inclusion in an `ApplicationPayload` contained within a `BeaconBlockBody`. - -```python -class Transaction(Container): - nonce: uint64 - gas_price: uint256 - gas_limit: uint64 - recipient: Bytes20 - value: uint256 - data: ByteList[MAX_BYTES_PER_TRANSACTION_PAYLOAD] - v: uint256 - r: uint256 - s: uint256 -``` - #### `ApplicationPayload` -The application payload included in a `BeaconBlock`. +The application payload included in a `BeaconBlockBody`. ```python class ApplicationPayload(Container): @@ -115,7 +105,7 @@ class ApplicationPayload(Container): gas_used: uint64 receipt_root: Bytes32 logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] - transactions: List[Transaction, MAX_APPLICATION_TRANSACTIONS] + transactions: List[OpaqueTransaction, MAX_APPLICATION_TRANSACTIONS] ``` ## Helper functions From e17ab8e8de048f7cfba77a520fe03bc31b59297f Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 9 Dec 2020 15:29:21 +0800 Subject: [PATCH 030/127] DAS phase 1 --- specs/phase1/beacon-chain.md | 1035 +++++++++------------------------- 1 file changed, 262 insertions(+), 773 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 5acc496d6..e9e252824 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -10,81 +10,15 @@ - [Introduction](#introduction) - [Custom types](#custom-types) - [Configuration](#configuration) - - [Misc](#misc) - - [Shard block configs](#shard-block-configs) - - [Gwei values](#gwei-values) - - [Initial values](#initial-values) - - [Time parameters](#time-parameters) - - [Domain types](#domain-types) - [Updated containers](#updated-containers) - - [Extended `AttestationData`](#extended-attestationdata) - - [Extended `Attestation`](#extended-attestation) - - [Extended `PendingAttestation`](#extended-pendingattestation) - - [Extended `IndexedAttestation`](#extended-indexedattestation) - - [Extended `AttesterSlashing`](#extended-attesterslashing) - - [Extended `Validator`](#extended-validator) - - [Extended `BeaconBlockBody`](#extended-beaconblockbody) - - [Extended `BeaconBlock`](#extended-beaconblock) - - [Extended `SignedBeaconBlock`](#extended-signedbeaconblock) - - [Extended `BeaconState`](#extended-beaconstate) - [New containers](#new-containers) - - [`ShardBlock`](#shardblock) - - [`SignedShardBlock`](#signedshardblock) - - [`ShardBlockHeader`](#shardblockheader) - - [`ShardState`](#shardstate) - - [`ShardTransition`](#shardtransition) - - [`CompactCommittee`](#compactcommittee) - [Helper functions](#helper-functions) - - [Misc](#misc-1) - - [`compute_previous_slot`](#compute_previous_slot) - - [`pack_compact_validator`](#pack_compact_validator) - - [`unpack_compact_validator`](#unpack_compact_validator) - - [`committee_to_compact_committee`](#committee_to_compact_committee) - - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) - - [`compute_offset_slots`](#compute_offset_slots) - - [`compute_updated_gasprice`](#compute_updated_gasprice) - - [`compute_committee_source_epoch`](#compute_committee_source_epoch) - - [Beacon state accessors](#beacon-state-accessors) - - [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot) - - [`get_active_shard_count`](#get_active_shard_count) - - [`get_online_validator_indices`](#get_online_validator_indices) - - [`get_shard_committee`](#get_shard_committee) - - [`get_light_client_committee`](#get_light_client_committee) - - [`get_shard_proposer_index`](#get_shard_proposer_index) - - [`get_committee_count_delta`](#get_committee_count_delta) - - [`get_start_shard`](#get_start_shard) - - [`get_latest_slot_for_shard`](#get_latest_slot_for_shard) - - [`get_offset_slots`](#get_offset_slots) - - [Predicates](#predicates) - - [`is_on_time_attestation`](#is_on_time_attestation) - - [`is_winning_attestation`](#is_winning_attestation) - - [`optional_aggregate_verify`](#optional_aggregate_verify) - - [`optional_fast_aggregate_verify`](#optional_fast_aggregate_verify) - - [Block processing](#block-processing) - - [Operations](#operations) - - [New Attestation processing](#new-attestation-processing) - - [`validate_attestation`](#validate_attestation) - - [Updated `process_attestation`](#updated-process_attestation) - - [Shard transition processing](#shard-transition-processing) - - [`apply_shard_transition`](#apply_shard_transition) - - [`process_crosslink_for_shard`](#process_crosslink_for_shard) - - [`process_crosslinks`](#process_crosslinks) - - [`verify_empty_shard_transition`](#verify_empty_shard_transition) - - [`process_shard_transitions`](#process_shard_transitions) - - [New default validator for deposits](#new-default-validator-for-deposits) - - [Light client processing](#light-client-processing) - - [Epoch transition](#epoch-transition) - - [Phase 1 final updates](#phase-1-final-updates) - - [Custody game updates](#custody-game-updates) - - [Online-tracking](#online-tracking) - - [Light client committee updates](#light-client-committee-updates) ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain - to facilitate the new shards as part of Phase 1 of Eth2. +This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding. ## Custom types @@ -93,7 +27,6 @@ We define the following Python custom types for type hinting and readability: | Name | SSZ equivalent | Description | | - | - | - | | `Shard` | `uint64` | a shard number | -| `OnlineEpochs` | `uint8` | online countdown epochs | ## Configuration @@ -106,56 +39,48 @@ Configuration is not namespaced. Instead it is strictly an extension; | - | - | | `MAX_SHARDS` | `uint64(2**10)` (= 1024) | | `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | -| `LIGHT_CLIENT_COMMITTEE_SIZE` | `uint64(2**7)` (= 128) | -| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | +| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 2) | ### Shard block configs -| Name | Value | Unit | +| Name | Value | Notes | | - | - | - | -| `MAX_SHARD_BLOCK_SIZE` | `uint64(2**20)` (= 1,048,576) | bytes | -| `TARGET_SHARD_BLOCK_SIZE` | `uint64(2**18)` (= 262,144) | bytes | -| `SHARD_BLOCK_OFFSETS` | `List[uint64, 12]([1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233])` | - | -| `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | - | -| `BYTES_PER_CUSTODY_CHUNK` | `uint64(2**12)` (= 4,096) | bytes | -| `CUSTODY_RESPONSE_DEPTH` | `ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)` | - | +| `POINTS_PER_SAMPLE` | `uint64(2**3)` (= 8) | 31 * 8 = 248 bytes | +| `MAX_SAMPLES_PER_BLOCK` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes | +| `TARGET_SAMPLES_PER_BLOCK` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes | + +### Precomputed size verification points + +| Name | Value | +| - | - | +| `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | + +These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X - w ** revbit(a))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. ### Gwei values -| Name | Value | -| - | - | -| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | -| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | - -### Initial values - -| Name | Value | -| - | - | -| `NO_SIGNATURE` | `BLSSignature(b'\x00' * 96)` | +| Name | Value | Unit | Description | +| - | - | - | - | +| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | Max gasprice charged for an TARGET-sized shard block | +| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for an TARGET-sized shard block | ### Time parameters | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 mins | -| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | +| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | ### Domain types | Name | Value | | - | - | -| `DOMAIN_SHARD_PROPOSAL` | `DomainType('0x80000000')` | -| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | -| `DOMAIN_LIGHT_CLIENT` | `DomainType('0x82000000')` | -| `DOMAIN_CUSTODY_BIT_SLASHING` | `DomainType('0x83000000')` | -| `DOMAIN_LIGHT_SELECTION_PROOF` | `DomainType('0x84000000')` | -| `DOMAIN_LIGHT_AGGREGATE_AND_PROOF` | `DomainType('0x85000000')` | +| `DOMAIN_SHARD_HEADER` | 0x40 | ## Updated containers The following containers have updated definitions in Phase 1. -### Extended `AttestationData` +### `AttestationData` ```python class AttestationData(Container): @@ -166,246 +91,58 @@ class AttestationData(Container): # FFG vote source: Checkpoint target: Checkpoint - # Shard vote - shard: Shard - # Current-slot shard block root - shard_head_root: Root - # Shard transition root - shard_transition_root: Root + # Shard header root + shard_header_root: Root ``` -### Extended `Attestation` +### `BeaconState` ```python -class Attestation(Container): - aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] - data: AttestationData - signature: BLSSignature -``` - -### Extended `PendingAttestation` - -```python -class PendingAttestation(Container): - aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] - data: AttestationData - inclusion_delay: Slot - proposer_index: ValidatorIndex - # Phase 1 - crosslink_success: boolean -``` - -### Extended `IndexedAttestation` - -```python -class IndexedAttestation(Container): - attesting_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE] - data: AttestationData - signature: BLSSignature -``` - -### Extended `AttesterSlashing` - -Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition. - -```python -class AttesterSlashing(Container): - attestation_1: IndexedAttestation - attestation_2: IndexedAttestation -``` - -### Extended `Validator` - -```python -class Validator(Container): - pubkey: BLSPubkey - withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals - effective_balance: Gwei # Balance at stake - slashed: boolean - # Status epochs - activation_eligibility_epoch: Epoch # When criteria for activation were met - activation_epoch: Epoch - exit_epoch: Epoch - withdrawable_epoch: Epoch # When validator can withdraw funds - # Custody game - # next_custody_secret_to_reveal is initialised to the custody period - # (of the particular validator) in which the validator is activated - # = get_custody_period_for_validator(...) - next_custody_secret_to_reveal: uint64 - # TODO: The max_reveal_lateness doesn't really make sense anymore. - # So how do we incentivise early custody key reveals now? - all_custody_secrets_revealed_epoch: Epoch # to be initialized to FAR_FUTURE_EPOCH -``` - -### Extended `BeaconBlockBody` - -```python -class BeaconBlockBody(Container): - randao_reveal: BLSSignature - eth1_data: Eth1Data # Eth1 data vote - graffiti: Bytes32 # Arbitrary data - # Slashings - proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS] - attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS] - # Attesting - attestations: List[Attestation, MAX_ATTESTATIONS] - # Entry & exit - deposits: List[Deposit, MAX_DEPOSITS] - voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] - # Custody game - chunk_challenges: List[CustodyChunkChallenge, MAX_CUSTODY_CHUNK_CHALLENGES] - chunk_challenge_responses: List[CustodyChunkResponse, MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES] - custody_key_reveals: List[CustodyKeyReveal, MAX_CUSTODY_KEY_REVEALS] - early_derived_secret_reveals: List[EarlyDerivedSecretReveal, MAX_EARLY_DERIVED_SECRET_REVEALS] - custody_slashings: List[SignedCustodySlashing, MAX_CUSTODY_SLASHINGS] - # Shards - shard_transitions: Vector[ShardTransition, MAX_SHARDS] - # Light clients - light_client_bits: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE] - light_client_signature: BLSSignature -``` - -### Extended `BeaconBlock` - -Note that the `body` has a new `BeaconBlockBody` definition. - -```python -class BeaconBlock(Container): - slot: Slot - proposer_index: ValidatorIndex - parent_root: Root - state_root: Root - body: BeaconBlockBody -``` - -#### Extended `SignedBeaconBlock` - -Note that the `message` has a new `BeaconBlock` definition. - -```python -class SignedBeaconBlock(Container): - message: BeaconBlock - signature: BLSSignature -``` - -### Extended `BeaconState` - -Note that aside from the new additions, `Validator` and `PendingAttestation` have new definitions. - -```python -class BeaconState(Container): - # Versioning - genesis_time: uint64 - genesis_validators_root: Root - slot: Slot - fork: Fork - # History - latest_block_header: BeaconBlockHeader - block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] - state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] - historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] - # Eth1 - eth1_data: Eth1Data - eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] - eth1_deposit_index: uint64 - # Registry - validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] - balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] - # Randomness - randao_mixes: Vector[Root, EPOCHS_PER_HISTORICAL_VECTOR] - # Slashings - slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances - # Attestations - previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - # Finality - justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch - previous_justified_checkpoint: Checkpoint # Previous epoch snapshot - current_justified_checkpoint: Checkpoint - finalized_checkpoint: Checkpoint - # Phase 1 - current_epoch_start_shard: Shard - shard_states: List[ShardState, MAX_SHARDS] - online_countdown: List[OnlineEpochs, VALIDATOR_REGISTRY_LIMIT] # not a raw byte array, considered its large size. - current_light_committee: CompactCommittee - next_light_committee: CompactCommittee - # Custody game - # Future derived secrets already exposed; contains the indices of the exposed validator - # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS - exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH], - EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] - custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS] - custody_chunk_challenge_index: uint64 +class BeaconState(phase0.BeaconState): + current_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] + previous_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] + confirmed_header_root: Root + shard_gasprice: uint64 ``` ## New containers The following containers are new in Phase 1. -### `ShardBlock` +### `ShardHeader` ```python -class ShardBlock(Container): - shard_parent_root: Root - beacon_parent_root: Root +class ShardHeader(Container): + # Slot and shard that this header is intended for slot: Slot shard: Shard - proposer_index: ValidatorIndex - body: ByteList[MAX_SHARD_BLOCK_SIZE] + # Kate commitment to the data + commitment: BLSCommitment + # Length of the data in samples + length: uint64 + # Proof of the length (more precisely, proof that values at + # positions >= the length all equal zero) + length_proof: BLSCommitment ``` -### `SignedShardBlock` +### `PendingShardHeader` ```python -class SignedShardBlock(Container): - message: ShardBlock - signature: BLSSignature -``` - -### `ShardBlockHeader` - -```python -class ShardBlockHeader(Container): - shard_parent_root: Root - beacon_parent_root: Root - slot: Slot +class PendingShardHeader(Container): + # Slot and shard that this header is intended for + slot: uint64 shard: Shard - proposer_index: ValidatorIndex - body_root: Root -``` - -### `ShardState` - -```python -class ShardState(Container): - slot: Slot - gasprice: Gwei - latest_block_root: Root -``` - -### `ShardTransition` - -```python -class ShardTransition(Container): - # Starting from slot - start_slot: Slot - # Shard block lengths - shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION] - # Shard data roots - # The root is of ByteList[MAX_SHARD_BLOCK_SIZE] - shard_data_roots: List[Root, MAX_SHARD_BLOCKS_PER_ATTESTATION] - # Intermediate shard states - shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION] - # Proposer signature aggregate - proposer_signature_aggregate: BLSSignature -``` - -### `CompactCommittee` - -```python -class CompactCommittee(Container): - pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE] - compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE] + # Kate commitment to the data + commitment: BLSCommitment + # hash_tree_root of the ShardHeader (stored so that attestations + # can be checked against it) + root: Hash + # Length of the data in samples + length: uint64 + # Who voted for the header + votes: Bitlist[MAX_COMMITTEE_SIZE] + # Has this header been confirmed? + confirmed: bool ``` ## Helper functions @@ -422,48 +159,6 @@ def compute_previous_slot(slot: Slot) -> Slot: return Slot(0) ``` -#### `pack_compact_validator` - -```python -def pack_compact_validator(index: ValidatorIndex, slashed: bool, balance_in_increments: uint64) -> uint64: - """ - Create a compact validator object representing index, slashed status, and compressed balance. - Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with - the unpacking function. - """ - return (index << 16) + (uint64(slashed) << 15) + balance_in_increments -``` - -#### `unpack_compact_validator` - -```python -def unpack_compact_validator(compact_validator: uint64) -> Tuple[ValidatorIndex, bool, uint64]: - """ - Return validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT - """ - return ( - ValidatorIndex(compact_validator >> 16), - bool((compact_validator >> 15) % 2), - compact_validator & (2**15 - 1), - ) -``` - -#### `committee_to_compact_committee` - -```python -def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee: - """ - Given a state and a list of validator indices, outputs the ``CompactCommittee`` representing them. - """ - validators = [state.validators[i] for i in committee] - compact_validators = [ - pack_compact_validator(i, bool(v.slashed), v.effective_balance // EFFECTIVE_BALANCE_INCREMENT) - for i, v in zip(committee, validators) - ] - pubkeys = [v.pubkey for v in validators] - return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators) -``` - #### `compute_shard_from_committee_index` ```python @@ -472,27 +167,17 @@ def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex return Shard((index + get_start_shard(state, slot)) % active_shards) ``` -#### `compute_offset_slots` - -```python -def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]: - """ - Return the offset slots that are greater than ``start_slot`` and less than ``end_slot``. - """ - return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < end_slot] -``` - #### `compute_updated_gasprice` ```python def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64) -> Gwei: - if shard_block_length > TARGET_SHARD_BLOCK_SIZE: - delta = (prev_gasprice * (shard_block_length - TARGET_SHARD_BLOCK_SIZE) - // TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT) + if shard_block_length > TARGET_SAMPLES_PER_BLOCK: + delta = (prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK) + // TARGET_SAMPLES_PER_BLOCK // GASPRICE_ADJUSTMENT_COEFFICIENT) return min(prev_gasprice + delta, MAX_GASPRICE) else: - delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - shard_block_length) - // TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT) + delta = (prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length) + // TARGET_SAMPLES_PER_BLOCK // GASPRICE_ADJUSTMENT_COEFFICIENT) return max(prev_gasprice, MIN_GASPRICE + delta) - delta ``` @@ -535,14 +220,6 @@ def get_active_shard_count(state: BeaconState) -> uint64: return INITIAL_ACTIVE_SHARDS ``` -#### `get_online_validator_indices` - -```python -def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]: - active_validators = get_active_validator_indices(state, get_current_epoch(state)) - return set(i for i in active_validators if state.online_countdown[i] != 0) # non-duplicate -``` - #### `get_shard_committee` ```python @@ -561,24 +238,6 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) - ) ``` -#### `get_light_client_committee` - -```python -def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: - """ - Return the light client committee of no more than ``LIGHT_CLIENT_COMMITTEE_SIZE`` validators. - """ - source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD) - active_validator_indices = get_active_validator_indices(beacon_state, source_epoch) - seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT) - return compute_committee( - indices=active_validator_indices, - seed=seed, - index=uint64(0), - count=get_active_shard_count(beacon_state), - )[:LIGHT_CLIENT_COMMITTEE_SIZE] -``` - #### `get_shard_proposer_index` ```python @@ -633,88 +292,9 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: ) ``` -#### `get_latest_slot_for_shard` - -```python -def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot: - """ - Return the latest slot number of the given ``shard``. - """ - return state.shard_states[shard].slot -``` - -#### `get_offset_slots` - -```python -def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]: - """ - Return the offset slots of the given ``shard``. - The offset slot are after the latest slot and before current slot. - """ - return compute_offset_slots(get_latest_slot_for_shard(state, shard), state.slot) -``` ### Predicates -#### `is_on_time_attestation` - -```python -def is_on_time_attestation(state: BeaconState, - attestation_data: AttestationData) -> bool: - """ - Check if the given ``attestation_data`` is on-time. - """ - return attestation_data.slot == compute_previous_slot(state.slot) -``` - -#### `is_winning_attestation` - -```python -def is_winning_attestation(state: BeaconState, - attestation: PendingAttestation, - committee_index: CommitteeIndex, - winning_root: Root) -> bool: - """ - Check if on-time ``attestation`` helped contribute to the successful crosslink of - ``winning_root`` formed by ``committee_index`` committee. - """ - return ( - is_on_time_attestation(state, attestation.data) - and attestation.data.index == committee_index - and attestation.data.shard_transition_root == winning_root - ) -``` - -#### `optional_aggregate_verify` - -```python -def optional_aggregate_verify(pubkeys: Sequence[BLSPubkey], - messages: Sequence[Bytes32], - signature: BLSSignature) -> bool: - """ - If ``pubkeys`` is an empty list, the given ``signature`` should be a stub ``NO_SIGNATURE``. - Otherwise, verify it with standard BLS AggregateVerify API. - """ - if len(pubkeys) == 0: - return signature == NO_SIGNATURE - else: - return bls.AggregateVerify(pubkeys, messages, signature) -``` - -#### `optional_fast_aggregate_verify` - -```python -def optional_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: - """ - If ``pubkeys`` is an empty list, the given ``signature`` should be a stub ``NO_SIGNATURE``. - Otherwise, verify it with standard BLS FastAggregateVerify API. - """ - if len(pubkeys) == 0: - return signature == NO_SIGNATURE - else: - return bls.FastAggregateVerify(pubkeys, message, signature) -``` - ### Block processing ```python @@ -752,250 +332,108 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs) ``` -##### New Attestation processing +### New Attestation processing -###### `validate_attestation` - -```python -def validate_attestation(state: BeaconState, attestation: Attestation) -> None: - data = attestation.data - assert data.index < get_committee_count_per_slot(state, data.target.epoch) - assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) - assert data.target.epoch == compute_epoch_at_slot(data.slot) - assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH - - committee = get_beacon_committee(state, data.slot, data.index) - assert len(attestation.aggregation_bits) == len(committee) - - if data.target.epoch == get_current_epoch(state): - assert data.source == state.current_justified_checkpoint - else: - assert data.source == state.previous_justified_checkpoint - - # Type 1: on-time attestations - if is_on_time_attestation(state, data): - # Correct parent block root - assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot)) - # Correct shard number - shard = compute_shard_from_committee_index(state, data.index, data.slot) - assert data.shard == shard - # NOTE: We currently set `PHASE_1_FORK_SLOT` to `GENESIS_SLOT` for test vectors. - if data.slot > GENESIS_SLOT: - # On-time attestations should have a non-empty shard transition root - assert data.shard_transition_root != hash_tree_root(ShardTransition()) - else: - assert data.shard_transition_root == hash_tree_root(ShardTransition()) - # Type 2: no shard transition - else: - # Ensure delayed attestation - assert data.slot < compute_previous_slot(state.slot) - # Late attestations cannot have a shard transition root - assert data.shard_transition_root == Root() - - # Signature check - assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) -``` - -###### Updated `process_attestation` +#### Updated `process_attestation` ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: - validate_attestation(state, attestation) - # Store pending attestation for epoch processing - pending_attestation = PendingAttestation( - aggregation_bits=attestation.aggregation_bits, - data=attestation.data, - inclusion_delay=state.slot - attestation.data.slot, - proposer_index=get_beacon_proposer_index(state), - crosslink_success=boolean(False), # To be filled in during process_shard_transitions + phase0.process_attestation(state, attestation) + update_pending_votes( + state=state, + attestation: Attestation, + root=, + aggregation_bits=attestation.aggregation_bits ) - if attestation.data.target.epoch == get_current_epoch(state): - state.current_epoch_attestations.append(pending_attestation) +``` + +#### `update_pending_votes` + +```python +def update_pending_votes(state: BeaconState, + attestation: Attestation) -> None: + if slot_to_epoch(slot) == get_current_epoch(state): + pending_headers = state.current_epoch_pending_headers else: - state.previous_epoch_attestations.append(pending_attestation) -``` - -##### Shard transition processing - -###### `apply_shard_transition` - -```python -def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None: - # TODO: only need to check it once when phase 1 starts - assert state.slot > PHASE_1_FORK_SLOT - - # Correct data root count - offset_slots = get_offset_slots(state, shard) - assert ( - len(transition.shard_data_roots) - == len(transition.shard_states) - == len(transition.shard_block_lengths) - == len(offset_slots) + pending_headers = state.previous_epoch_pending_headers + # Create or update the PendingShardHeader object + pending_header_index = None + for index, header in enumerate(pending_headers): + if header.root == attestation.data.shard_header_root: + pending_header_index = index + break + assert pending_header_index is not None + pending_header = pending_headers[pending_header_index] + assert pending_header.slot == attestation.data.slot + 1 + assert pending_header.shard == compute_shard_from_committee_index( + state, + attestation.data.index, + attestation.data.slot + ) + pending_header.votes = bitwise_or( + pending_header.votes, + attestation.aggregation_bits ) - assert transition.start_slot == offset_slots[0] - headers = [] - proposers = [] - prev_gasprice = state.shard_states[shard].gasprice - shard_parent_root = state.shard_states[shard].latest_block_root - for i, offset_slot in enumerate(offset_slots): - shard_block_length = transition.shard_block_lengths[i] - shard_state = transition.shard_states[i] - # Verify correct calculation of gas prices and slots - assert shard_state.gasprice == compute_updated_gasprice(prev_gasprice, shard_block_length) - assert shard_state.slot == offset_slot - # Collect the non-empty proposals result - is_empty_proposal = shard_block_length == 0 - if not is_empty_proposal: - proposal_index = get_shard_proposer_index(state, offset_slot, shard) - # Reconstruct shard headers - header = ShardBlockHeader( - shard_parent_root=shard_parent_root, - beacon_parent_root=get_block_root_at_slot(state, offset_slot), - slot=offset_slot, - shard=shard, - proposer_index=proposal_index, - body_root=transition.shard_data_roots[i] - ) - shard_parent_root = hash_tree_root(header) - headers.append(header) - proposers.append(proposal_index) - else: - # Must have a stub for `shard_data_root` if empty slot - assert transition.shard_data_roots[i] == Root() - - prev_gasprice = shard_state.gasprice - - pubkeys = [state.validators[proposer].pubkey for proposer in proposers] - signing_roots = [ - compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(header.slot))) - for header in headers + # Check if the PendingShardHeader is eligible for expedited confirmation + # Requirement 1: nothing else confirmed + all_candidates = [ + c for c in pending_headers if + (c.slot, c.shard) == (pending_header.slot, pending_header.shard) ] - # Verify combined proposer signature - assert optional_aggregate_verify(pubkeys, signing_roots, transition.proposer_signature_aggregate) - - # Copy and save updated shard state - shard_state = copy(transition.shard_states[len(transition.shard_states) - 1]) - shard_state.slot = compute_previous_slot(state.slot) - state.shard_states[shard] = shard_state + if True not in [c.confirmed for c in all_candidates]: + # Requirement 2: >= 2/3 of balance attesting + participants = get_attesting_indices(state, data, pending_commitment.votes) + participants_balance = get_total_balance(state, participants) + full_committee = get_beacon_committee(state, data.slot, data.shard) + full_committee_balance = get_total_balance(state, full_committee) + if participants_balance * 2 > full_committee_balance: + pending_header.confirmed = True ``` -###### `process_crosslink_for_shard` +#### `process_shard_data_commitment` ```python -def process_crosslink_for_shard(state: BeaconState, - committee_index: CommitteeIndex, - shard_transition: ShardTransition, - attestations: Sequence[Attestation]) -> Root: - on_time_attestation_slot = compute_previous_slot(state.slot) - committee = get_beacon_committee(state, on_time_attestation_slot, committee_index) - online_indices = get_online_validator_indices(state) - shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot) - - # Loop over all shard transition roots - shard_transition_roots = set([a.data.shard_transition_root for a in attestations]) - for shard_transition_root in sorted(shard_transition_roots): - transition_attestations = [a for a in attestations if a.data.shard_transition_root == shard_transition_root] - transition_participants: Set[ValidatorIndex] = set() - for attestation in transition_attestations: - participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) - transition_participants = transition_participants.union(participants) - - enough_online_stake = ( - get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >= - get_total_balance(state, online_indices.intersection(committee)) * 2 - ) - # If not enough stake, try next transition root - if not enough_online_stake: - continue - - # Attestation <-> shard transition consistency - assert shard_transition_root == hash_tree_root(shard_transition) - - # Check `shard_head_root` of the winning root - last_offset_index = len(shard_transition.shard_states) - 1 - shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root - for attestation in transition_attestations: - assert attestation.data.shard_head_root == shard_head_root - - # Apply transition - apply_shard_transition(state, shard, shard_transition) - # Apply proposer reward and cost - beacon_proposer_index = get_beacon_proposer_index(state) - estimated_attester_reward = sum([get_base_reward(state, attester) for attester in transition_participants]) - proposer_reward = Gwei(estimated_attester_reward // PROPOSER_REWARD_QUOTIENT) - increase_balance(state, beacon_proposer_index, proposer_reward) - states_slots_lengths = zip( - shard_transition.shard_states, - get_offset_slots(state, shard), - shard_transition.shard_block_lengths - ) - for shard_state, slot, length in states_slots_lengths: - proposer_index = get_shard_proposer_index(state, slot, shard) - decrease_balance(state, proposer_index, shard_state.gasprice * length) - - # Return winning transition root - return shard_transition_root - - # No winning transition root, ensure empty and return empty root - assert shard_transition == ShardTransition() - return Root() +def process_shard_data_commitment(state: BeaconState, + signed_header: Signed[ShardDataHeader]) -> None: + header = signed_header.message + header_root = hash_tree_root(header) + # Verify signature + signer_index = get_shard_proposer_index(state, header.slot, header.shard) + assert bls.Verify( + state.validators[signer_index].pubkey, + compute_signing_root(header, get_domain(state, DOMAIN_SHARD_HEADER)), + signed_header.signature + ) + # Verify length of the header + assert ( + bls.Pairing(header.length_proof, SIZE_CHECK_POINTS[header.length]) == + bls.Pairing(header.commitment, G2_ONE) + ) + # Get the correct pending header list + if slot_to_epoch(header.slot) == get_current_epoch(state): + pending_headers = state.current_epoch_pending_headers + else: + pending_headers = state.previous_epoch_pending_headers + + # Check that this header is not yet in the pending list + for pending_header in pending_headers: + assert header_root != pending_header.root + # Include it in the pending list + committee_length = len(get_beacon_committee(state, header.slot, header.shard)) + pending_headers.append(PendingShardHeader( + slot=header.slot, + shard=header.shard, + commitment=header.commitment, + root=header_root, + length=header.length, + votes=Bitlist[MAX_COMMITTEE_SIZE]([0] * committee_length), + confirmed=False + )) ``` -###### `process_crosslinks` +### Shard transition processing -```python -def process_crosslinks(state: BeaconState, - shard_transitions: Sequence[ShardTransition], - attestations: Sequence[Attestation]) -> None: - on_time_attestation_slot = compute_previous_slot(state.slot) - committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(on_time_attestation_slot)) - for committee_index in map(CommitteeIndex, range(committee_count)): - # All attestations in the block for this committee/shard and current slot - shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot) - # Since the attestations are validated, all `shard_attestations` satisfy `attestation.data.shard == shard` - shard_attestations = [ - attestation for attestation in attestations - if is_on_time_attestation(state, attestation.data) and attestation.data.index == committee_index - ] - winning_root = process_crosslink_for_shard( - state, committee_index, shard_transitions[shard], shard_attestations - ) - if winning_root != Root(): - # Mark relevant pending attestations as creating a successful crosslink - for pending_attestation in state.current_epoch_attestations: - if is_winning_attestation(state, pending_attestation, committee_index, winning_root): - pending_attestation.crosslink_success = True -``` - -###### `verify_empty_shard_transition` - -```python -def verify_empty_shard_transition(state: BeaconState, shard_transitions: Sequence[ShardTransition]) -> bool: - """ - Verify that a `shard_transition` in a block is empty if an attestation was not processed for it. - """ - for shard in range(get_active_shard_count(state)): - if state.shard_states[shard].slot != compute_previous_slot(state.slot): - if shard_transitions[shard] != ShardTransition(): - return False - return True -``` - -###### `process_shard_transitions` - -```python -def process_shard_transitions(state: BeaconState, - shard_transitions: Sequence[ShardTransition], - attestations: Sequence[Attestation]) -> None: - # NOTE: We currently set `PHASE_1_FORK_SLOT` to `GENESIS_SLOT` for test vectors. - if compute_previous_slot(state.slot) > GENESIS_SLOT: - # Process crosslinks - process_crosslinks(state, shard_transitions, attestations) - - # Verify the empty proposal shard states - assert verify_empty_shard_transition(state, shard_transitions) -``` ##### New default validator for deposits @@ -1021,30 +459,6 @@ def get_validator_from_deposit(state: BeaconState, deposit: Deposit) -> Validato ) ``` -#### Light client processing - -```python -def process_light_client_aggregate(state: BeaconState, block_body: BeaconBlockBody) -> None: - committee = get_light_client_committee(state, get_current_epoch(state)) - previous_slot = compute_previous_slot(state.slot) - previous_block_root = get_block_root_at_slot(state, previous_slot) - - total_reward = Gwei(0) - signer_pubkeys = [] - for bit_index, participant_index in enumerate(committee): - if block_body.light_client_bits[bit_index]: - signer_pubkeys.append(state.validators[participant_index].pubkey) - if not state.validators[participant_index].slashed: - increase_balance(state, participant_index, get_base_reward(state, participant_index)) - total_reward += get_base_reward(state, participant_index) - - increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT)) - - signing_root = compute_signing_root(previous_block_root, - get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(previous_slot))) - assert optional_fast_aggregate_verify(signer_pubkeys, signing_root, block_body.light_client_signature) -``` - ### Epoch transition This epoch transition overrides the phase0 epoch transition: @@ -1054,59 +468,134 @@ def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) process_rewards_and_penalties(state) process_registry_updates(state) - process_reveal_deadlines(state) # Phase 1 - process_challenge_deadlines(state) # Phase 1 + + # Proof of custody + process_reveal_deadlines(state) + process_challenge_deadlines(state) + process_slashings(state) + + # Sharding + process_pending_headers(state) + charge_confirmed_header_fees(state) + reset_pending_headers(state) + + # Final updates + # Phase 0 process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_roots_update(state) process_participation_record_updates(state) - process_phase_1_final_updates(state) # Phase 1 -``` - -#### Phase 1 final updates - -```python -def process_phase_1_final_updates(state: BeaconState) -> None: + # Proof of custody process_custody_final_updates(state) - process_online_tracking(state) - process_light_client_committee_updates(state) - # Update current_epoch_start_shard state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1)) ``` +#### Pending headers + +```python + +def process_pending_headers(state: BeaconState): + for slot in range(SLOTS_PER_EPOCH): + for shard in range(SHARD_COUNT): + # Pending headers for this (slot, shard) combo + candidates = [ + c for c in state.previous_epoch_pending_headers if + (c.slot, c.shard) == (slot, shard) + ] + if True not in [c.confirmed for c in candidates]: + # The entire committee (and its balance) + full_committee = get_beacon_committee(state, slot, shard) + full_committee_balance = get_total_balance(state, full_committee) + # The set of voters who voted for each header + # (and their total balances) + voting_sets = [ + [v for i, v in enumerate(full_committee) if c.votes[i]] + for c in candidates + ] + voting_balances = [ + get_total_balance(state, voters) + for voters in voting_sets + ] + # Get the index with the most total balance voting for them. + # NOTE: if two choices get exactly the same voting balance, + # the candidate earlier in the list wins + if max(voting_balances) > 0: + winning_index = voting_balances.index(max(voting_balances)) + else: + # If no votes, zero wins + winning_index = [c.root for c in candidates].index(Root()) + candidates[winning_index].confirmed = True + confirmed_headers = Vector[ + Vector[PendingShardHeader, SLOTS_PER_EPOCH], MAX_SHARDS + ]() + for c in state.previous_epoch_pending_headers: + if c.confirmed: + confirmed_headers[c.shard][c.slot % SLOTS_PER_EPOCH] = c + state.confirmed_header_root = hash_tree_root(confirmed_headers) +``` + +```python +def charge_confirmed_header_fees(state: BeaconState) -> None: + new_gasprice = state.shard_gasprice + for slot in range(SLOTS_PER_EPOCH): + for shard in range(SHARD_COUNT): + confirmed_candidates = [ + c for c in state.previous_epoch_pending_headers if + (c.slot, c.shard, c.confirmed) == (slot, shard, True) + ] + if confirmed_candidates: + candidate = confirmed_candidates[0] + # Charge EIP 1559 fee + proposer = get_shard_proposer(state, slot, shard) + fee = ( + (state.shard_gasprice * candidates[i].length) // + TARGET_SAMPLES_PER_BLOCK + ) + decrease_balance(state, proposer, fee) + new_gasprice = compute_updated_gasprice( + new_gasprice, + candidates[i].length + ) + state.shard_gasprice = new_gasprice +``` + +```python +def reset_pending_headers(state: BeaconState): + state.previous_epoch_pending_headers = state.current_epoch_pending_headers + shards = [ + compute_shard_from_committee_index(state, index, slot) + for i in range() + state, + attestation.data.index, + attestation.data.slot + ) + state.current_epoch_pending_headers = [] + # Add dummy "empty" PendingAttestations + # (default to vote for if no shard header availabl) + for slot in range(SLOTS_IN_EPOCH): + for index in range(get_committee_count_per_slot(get_current_epoch(state))): + shard = compute_shard_from_committee_index(state, index, slot) + committee_length = len(get_beacon_committee( + state, + header.slot, + header.shard + )) + state.current_epoch_pending_headers.append(PendingShardHeader( + slot=slot, + shard=shard, + commitment=BLSCommitment(), + root=Root(), + length=0, + votes=Bitlist[MAX_COMMITTEE_SIZE]([0] * committee_length), + confirmed=False + )) + +``` + #### Custody game updates -`process_reveal_deadlines`, `process_challenge_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./custody-game.md), - -#### Online-tracking - -```python -def process_online_tracking(state: BeaconState) -> None: - # Slowly remove validators from the "online" set if they do not show up - for index in range(len(state.validators)): - if state.online_countdown[index] != 0: - state.online_countdown[index] = state.online_countdown[index] - 1 - - # Process pending attestations - for pending_attestation in state.current_epoch_attestations + state.previous_epoch_attestations: - for index in get_attesting_indices(state, pending_attestation.data, pending_attestation.aggregation_bits): - state.online_countdown[index] = ONLINE_PERIOD -``` - -#### Light client committee updates - -```python -def process_light_client_committee_updates(state: BeaconState) -> None: - """ - Update light client committees. - """ - next_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) - if next_epoch % LIGHT_CLIENT_COMMITTEE_PERIOD == 0: - state.current_light_committee = state.next_light_committee - new_committee = get_light_client_committee(state, next_epoch + LIGHT_CLIENT_COMMITTEE_PERIOD) - state.next_light_committee = committee_to_compact_committee(state, new_committee) -``` +`process_reveal_deadlines`, `process_challenge_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./custody-game.md). From d02a6422e3df2a329319691caa8f307bec4cfe8f Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 10 Dec 2020 10:21:21 +0800 Subject: [PATCH 031/127] Combing... part 1 --- specs/phase1/beacon-chain.md | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index e9e252824..1434f9b0d 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -18,7 +18,7 @@ ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding. +This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/r1XzqYIOv) and more broadly [here](https://arxiv.org/abs/1809.09044), using Kate commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. ## Custom types @@ -30,16 +30,13 @@ We define the following Python custom types for type hinting and readability: ## Configuration -Configuration is not namespaced. Instead it is strictly an extension; - no constants of phase 0 change, but new constants are adopted for changing behaviors. - ### Misc -| Name | Value | -| - | - | -| `MAX_SHARDS` | `uint64(2**10)` (= 1024) | -| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | -| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 2) | +| Name | Value | Notes | +| - | - | - | +| `MAX_SHARDS` | `uint64(2**10)` (= 1024) | Theoretical max shard count (used to determine data structure sizes) | +| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | +| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 2) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | ### Shard block configs @@ -55,13 +52,13 @@ Configuration is not namespaced. Instead it is strictly an extension; | - | - | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X - w ** revbit(a))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. +These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X - w ** revbit(a))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values | Name | Value | Unit | Description | | - | - | - | - | -| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | Max gasprice charged for an TARGET-sized shard block | +| `MAX_GASPRICE` | `Gwei(2**24)` (= 16,777,216) | Gwei | Max gasprice charged for an TARGET-sized shard block | | `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for an TARGET-sized shard block | ### Time parameters @@ -74,7 +71,7 @@ These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_P | Name | Value | | - | - | -| `DOMAIN_SHARD_HEADER` | 0x40 | +| `DOMAIN_SHARD_HEADER` | `DomainType('0x80000000')` | ## Updated containers @@ -170,14 +167,14 @@ def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex #### `compute_updated_gasprice` ```python -def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64) -> Gwei: +def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64, adjustment_quotient: uint64) -> Gwei: if shard_block_length > TARGET_SAMPLES_PER_BLOCK: delta = (prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK) - // TARGET_SAMPLES_PER_BLOCK // GASPRICE_ADJUSTMENT_COEFFICIENT) + // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) return min(prev_gasprice + delta, MAX_GASPRICE) else: delta = (prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length) - // TARGET_SAMPLES_PER_BLOCK // GASPRICE_ADJUSTMENT_COEFFICIENT) + // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) return max(prev_gasprice, MIN_GASPRICE + delta) - delta ``` @@ -541,6 +538,7 @@ def process_pending_headers(state: BeaconState): ```python def charge_confirmed_header_fees(state: BeaconState) -> None: new_gasprice = state.shard_gasprice + adjustment_quotient = get_active_shard_count(state) * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT for slot in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): confirmed_candidates = [ @@ -558,7 +556,8 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: decrease_balance(state, proposer, fee) new_gasprice = compute_updated_gasprice( new_gasprice, - candidates[i].length + candidates[i].length, + adjustment_quotient ) state.shard_gasprice = new_gasprice ``` From f75ea377d51b19f1a5bb1c9bea3baf361c011791 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 10 Dec 2020 11:47:02 +0800 Subject: [PATCH 032/127] Second pass --- specs/phase1/beacon-chain.md | 108 +++++++++++++---------------------- 1 file changed, 39 insertions(+), 69 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 1434f9b0d..19f3c7a84 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -37,6 +37,7 @@ We define the following Python custom types for type hinting and readability: | `MAX_SHARDS` | `uint64(2**10)` (= 1024) | Theoretical max shard count (used to determine data structure sizes) | | `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 2) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | +| `MAX_SHARD_HEADERS` | `MAX_SHARDS * 4` | | ### Shard block configs @@ -50,6 +51,7 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | +| `G2_ONE` | The G2 generator | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X - w ** revbit(a))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. @@ -92,6 +94,13 @@ class AttestationData(Container): shard_header_root: Root ``` +### `BeaconBlock` + +```python +class BeaconBlock(phase0.BeaconBlock): + shard_headers: List[Signed[ShardHeader], MAX_SHARD_HEADERS] +``` + ### `BeaconState` ```python @@ -100,6 +109,7 @@ class BeaconState(phase0.BeaconState): previous_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] confirmed_header_root: Root shard_gasprice: uint64 + current_epoch_start_shard: Shard ``` ## New containers @@ -201,7 +211,7 @@ def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: Return the number of committees in each slot for the given ``epoch``. """ return max(uint64(1), min( - get_active_shard_count(state), + get_active_shard_count(state, epoch), uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, )) ``` @@ -209,7 +219,7 @@ def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: #### `get_active_shard_count` ```python -def get_active_shard_count(state: BeaconState) -> uint64: +def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64: """ Return the number of active shards. Note that this puts an upper bound on the number of committees per slot. @@ -231,7 +241,7 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) - indices=active_validator_indices, seed=seed, index=shard, - count=get_active_shard_count(beacon_state), + count=get_active_shard_count(beacon_state, epoch), ) ``` @@ -245,21 +255,7 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard epoch = compute_epoch_at_slot(slot) committee = get_shard_committee(beacon_state, epoch, shard) seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_COMMITTEE) + uint_to_bytes(slot)) - r = bytes_to_uint64(seed[:8]) - return committee[r % len(committee)] -``` - -#### `get_committee_count_delta` - -```python -def get_committee_count_delta(state: BeaconState, start_slot: Slot, stop_slot: Slot) -> uint64: - """ - Return the sum of committee counts in range ``[start_slot, stop_slot)``. - """ - return uint64(sum( - get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(slot))) - for slot in range(start_slot, stop_slot) - )) + return compute_proposer_index(state, committee, seed) ``` #### `get_start_shard` @@ -270,26 +266,24 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: Return the start shard at ``slot``. """ current_epoch_start_slot = compute_start_slot_at_epoch(get_current_epoch(state)) - active_shard_count = get_active_shard_count(state) - if current_epoch_start_slot == slot: - return state.current_epoch_start_shard - elif slot > current_epoch_start_slot: + shard = state.current_epoch_start_shard + if slot > current_epoch_start_slot: # Current epoch or the next epoch lookahead - shard_delta = get_committee_count_delta(state, start_slot=current_epoch_start_slot, stop_slot=slot) - return Shard((state.current_epoch_start_shard + shard_delta) % active_shard_count) - else: + for _slot in range(current_epoch_start_slot, slot): + committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot))) + active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot))) + shard = (shard + committee_count) % active_shard_count + return Shard(shard) + elif slot < current_epoch_start_slot: # Previous epoch - shard_delta = get_committee_count_delta(state, start_slot=slot, stop_slot=current_epoch_start_slot) - max_committees_per_slot = active_shard_count - max_committees_in_span = max_committees_per_slot * (current_epoch_start_slot - slot) - return Shard( + for _slot in list(range(slot, current_epoch_start_slot))[::-1]: + committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot))) + active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot))) # Ensure positive - (state.current_epoch_start_shard + max_committees_in_span - shard_delta) - % active_shard_count - ) + shard = (shard + active_shard_count - committee_count) % active_shard_count + return Shard(shard) ``` - ### Predicates ### Block processing @@ -320,6 +314,9 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) + # Limit is dynamic based on active shard count + assert len(body.shard_headers) <= 4 * get_active_shard_count(state, get_current_epoch(state)) + for_ops(body.shard_headers, process_shard_header) # See custody game spec. process_custody_game_operations(state, body) @@ -349,18 +346,16 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ```python def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: - if slot_to_epoch(slot) == get_current_epoch(state): + if compute_epoch_at_slot(slot) == get_current_epoch(state): pending_headers = state.current_epoch_pending_headers else: pending_headers = state.previous_epoch_pending_headers # Create or update the PendingShardHeader object - pending_header_index = None - for index, header in enumerate(pending_headers): + pending_header = None + for header in pending_headers: if header.root == attestation.data.shard_header_root: - pending_header_index = index - break - assert pending_header_index is not None - pending_header = pending_headers[pending_header_index] + pending_header = header + assert pending_header is not None assert pending_header.slot == attestation.data.slot + 1 assert pending_header.shard == compute_shard_from_committee_index( state, @@ -388,11 +383,11 @@ def update_pending_votes(state: BeaconState, pending_header.confirmed = True ``` -#### `process_shard_data_commitment` +#### `process_shard_header` ```python -def process_shard_data_commitment(state: BeaconState, - signed_header: Signed[ShardDataHeader]) -> None: +def process_shard_header(state: BeaconState, + signed_header: Signed[ShardDataHeader]) -> None: header = signed_header.message header_root = hash_tree_root(header) # Verify signature @@ -408,7 +403,7 @@ def process_shard_data_commitment(state: BeaconState, bls.Pairing(header.commitment, G2_ONE) ) # Get the correct pending header list - if slot_to_epoch(header.slot) == get_current_epoch(state): + if compute_epoch_at_slot(header.slot) == get_current_epoch(state): pending_headers = state.current_epoch_pending_headers else: pending_headers = state.previous_epoch_pending_headers @@ -431,31 +426,6 @@ def process_shard_data_commitment(state: BeaconState, ### Shard transition processing - -##### New default validator for deposits - -```python -def get_validator_from_deposit(state: BeaconState, deposit: Deposit) -> Validator: - amount = deposit.data.amount - effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) - next_custody_secret_to_reveal = get_custody_period_for_validator( - ValidatorIndex(len(state.validators)), - get_current_epoch(state), - ) - - return Validator( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - activation_eligibility_epoch=FAR_FUTURE_EPOCH, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - effective_balance=effective_balance, - next_custody_secret_to_reveal=next_custody_secret_to_reveal, - all_custody_secrets_revealed_epoch=FAR_FUTURE_EPOCH, - ) -``` - ### Epoch transition This epoch transition overrides the phase0 epoch transition: From 623f164bf1b08f34d38ae7bad2c4856cbef8ac43 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 10 Dec 2020 14:29:09 +0800 Subject: [PATCH 033/127] Fixed custom types --- specs/phase1/beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 19f3c7a84..92ac59162 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -26,7 +26,8 @@ We define the following Python custom types for type hinting and readability: | Name | SSZ equivalent | Description | | - | - | - | -| `Shard` | `uint64` | a shard number | +| `Shard` | `uint64` | A shard number | +| `BLSCommitment` | `bytes48` | A G1 curve point | ## Configuration From 3d108e7fe2cde0055a64af8aa4c9a5bb704563d3 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 10 Dec 2020 14:37:00 +0800 Subject: [PATCH 034/127] Made confirmed headers a separate object --- specs/phase1/beacon-chain.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 92ac59162..36ae05fca 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -108,7 +108,7 @@ class BeaconBlock(phase0.BeaconBlock): class BeaconState(phase0.BeaconState): current_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] previous_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] - confirmed_header_root: Root + two_epochs_ago_confirmed_headers: Vector[Vector[PendingShardHeader, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -497,13 +497,12 @@ def process_pending_headers(state: BeaconState): # If no votes, zero wins winning_index = [c.root for c in candidates].index(Root()) candidates[winning_index].confirmed = True - confirmed_headers = Vector[ - Vector[PendingShardHeader, SLOTS_PER_EPOCH], MAX_SHARDS - ]() + for slot in range(SLOTS_PER_EPOCH): + for shard in range(SHARD_COUNT): + state.two_epochs_ago_confirmed_headers[shard][slot] = PendingHeader() for c in state.previous_epoch_pending_headers: if c.confirmed: - confirmed_headers[c.shard][c.slot % SLOTS_PER_EPOCH] = c - state.confirmed_header_root = hash_tree_root(confirmed_headers) + state.two_epochs_ago_confirmed_headers[c.shard][c.slot % SLOTS_PER_EPOCH] = c ``` ```python From ed357b9f9fa636519af782fd1979be1bbf845182 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 10 Dec 2020 14:42:43 +0800 Subject: [PATCH 035/127] Abstracted (Kate commitment, length) into separate object --- specs/phase1/beacon-chain.md | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 36ae05fca..e2a46cd01 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -108,7 +108,7 @@ class BeaconBlock(phase0.BeaconBlock): class BeaconState(phase0.BeaconState): current_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] previous_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] - two_epochs_ago_confirmed_headers: Vector[Vector[PendingShardHeader, SLOTS_PER_EPOCH], MAX_SHARDS] + most_recent_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -117,6 +117,16 @@ class BeaconState(phase0.BeaconState): The following containers are new in Phase 1. +### `DataCommitment` + +```python +class DataCommitment(Container): + # Kate commitment to the data + point: BLSCommitment + # Length of the data in samples + length: length +``` + ### `ShardHeader` ```python @@ -124,10 +134,8 @@ class ShardHeader(Container): # Slot and shard that this header is intended for slot: Slot shard: Shard - # Kate commitment to the data - commitment: BLSCommitment - # Length of the data in samples - length: uint64 + # The actual data commitment + commitment: DataCommitment # Proof of the length (more precisely, proof that values at # positions >= the length all equal zero) length_proof: BLSCommitment @@ -400,8 +408,8 @@ def process_shard_header(state: BeaconState, ) # Verify length of the header assert ( - bls.Pairing(header.length_proof, SIZE_CHECK_POINTS[header.length]) == - bls.Pairing(header.commitment, G2_ONE) + bls.Pairing(header.length_proof, SIZE_CHECK_POINTS[header.commitment.length]) == + bls.Pairing(header.commitment.point, G2_ONE) ) # Get the correct pending header list if compute_epoch_at_slot(header.slot) == get_current_epoch(state): @@ -419,7 +427,6 @@ def process_shard_header(state: BeaconState, shard=header.shard, commitment=header.commitment, root=header_root, - length=header.length, votes=Bitlist[MAX_COMMITTEE_SIZE]([0] * committee_length), confirmed=False )) @@ -499,10 +506,10 @@ def process_pending_headers(state: BeaconState): candidates[winning_index].confirmed = True for slot in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): - state.two_epochs_ago_confirmed_headers[shard][slot] = PendingHeader() + state.most_recent_confirmed_commitments[shard][slot] = DataCommitment() for c in state.previous_epoch_pending_headers: if c.confirmed: - state.two_epochs_ago_confirmed_headers[c.shard][c.slot % SLOTS_PER_EPOCH] = c + state.most_recent_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment ``` ```python @@ -520,13 +527,13 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: # Charge EIP 1559 fee proposer = get_shard_proposer(state, slot, shard) fee = ( - (state.shard_gasprice * candidates[i].length) // + (state.shard_gasprice * candidates[i].commitment.length) // TARGET_SAMPLES_PER_BLOCK ) decrease_balance(state, proposer, fee) new_gasprice = compute_updated_gasprice( new_gasprice, - candidates[i].length, + candidates[i].commitment.length, adjustment_quotient ) state.shard_gasprice = new_gasprice @@ -556,9 +563,8 @@ def reset_pending_headers(state: BeaconState): state.current_epoch_pending_headers.append(PendingShardHeader( slot=slot, shard=shard, - commitment=BLSCommitment(), + commitment=DataCommitment(), root=Root(), - length=0, votes=Bitlist[MAX_COMMITTEE_SIZE]([0] * committee_length), confirmed=False )) From 2190c13858502e36e7d917124ce83e9202829545 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 10 Dec 2020 14:48:57 +0800 Subject: [PATCH 036/127] Added dependencies to fork choice section --- specs/phase1/fork-choice.md | 93 +++++++------------------------------ 1 file changed, 17 insertions(+), 76 deletions(-) diff --git a/specs/phase1/fork-choice.md b/specs/phase1/fork-choice.md index d2e1bfefe..3accd2d10 100644 --- a/specs/phase1/fork-choice.md +++ b/specs/phase1/fork-choice.md @@ -22,87 +22,28 @@ ## Introduction -This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. +This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. The only change that we add from phase 0 is that we add a concept of "data dependencies"; a block is only eligible for consideration in the fork choice after a data availability test has been successfully completed for all dependencies. The "root" of a shard block for data dependency purposes is considered to be a DataCommitment object, which is a pair of a Kate commitment and a length. -### Updated data structures - -#### Extended `Store` +## Dependency calculation ```python -@dataclass -class Store(object): - time: uint64 - genesis_time: uint64 - justified_checkpoint: Checkpoint - finalized_checkpoint: Checkpoint - best_justified_checkpoint: Checkpoint - blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) - block_states: Dict[Root, BeaconState] = field(default_factory=dict) - checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict) - latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict) - shard_stores: Dict[Shard, ShardStore] = field(default_factory=dict) -``` - -### New data structures - -#### `ShardLatestMessage` - -```python -@dataclass(eq=True, frozen=True) -class ShardLatestMessage(object): - epoch: Epoch - root: Root -``` - -#### `ShardStore` - -```python -@dataclass -class ShardStore: - shard: Shard - signed_blocks: Dict[Root, SignedShardBlock] = field(default_factory=dict) - block_states: Dict[Root, ShardState] = field(default_factory=dict) - latest_messages: Dict[ValidatorIndex, ShardLatestMessage] = field(default_factory=dict) -``` - -### Updated helpers - -#### Updated `get_forkchoice_store` - -```python -def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store: - assert anchor_block.state_root == hash_tree_root(anchor_state) - anchor_root = hash_tree_root(anchor_block) - anchor_epoch = get_current_epoch(anchor_state) - justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) - finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) - return Store( - time=anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot, - genesis_time=anchor_state.genesis_time, - justified_checkpoint=justified_checkpoint, - finalized_checkpoint=finalized_checkpoint, - best_justified_checkpoint=justified_checkpoint, - blocks={anchor_root: copy(anchor_block)}, - block_states={anchor_root: anchor_state.copy()}, - checkpoint_states={justified_checkpoint: anchor_state.copy()}, - shard_stores={ - Shard(shard): get_forkchoice_shard_store(anchor_state, Shard(shard)) - for shard in range(get_active_shard_count(anchor_state)) - } +def get_new_dependencies(state: BeaconState) -> Set[DataCommitment]: + return set( + # Already confirmed during this epoch + [c.commitment for c in state.current_epoch_pending_headers if c.confirmed] + + # Already confirmed during previous epoch + [c.commitment for c in state.previous_epoch_pending_headers if c.confirmed] + + # Confirmed in the epoch before the previous + [c for c in shard for shard in state.most_recent_confirmed_commitments if c != DataCommitment()] ) ``` -#### Updated `update_latest_messages` - ```python -def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None: - target = attestation.data.target - beacon_block_root = attestation.data.beacon_block_root - # TODO: separate shard chain vote - shard = attestation.data.shard - for i in attesting_indices: - if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch: - store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root) - shard_latest_message = ShardLatestMessage(epoch=target.epoch, root=attestation.data.shard_head_root) - store.shard_stores[shard].latest_messages[i] = shard_latest_message +def get_all_dependencies(store: Store, block: BeaconBlock) -> Set[DataCommitment]: + if block.slot < SHARDING_FORK_SLOT: + return set() + else: + latest = get_new_dependencies(store.block_states[hash_tree_root(block)]) + older = get_all_dependencies(store, store.blocks[block.parent_root]) + return latest.union(older) ``` From 1ce25c19e8a62cd5c5c09594cc97e8bd43a272a4 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 11 Dec 2020 16:10:50 +0800 Subject: [PATCH 037/127] Minor fixes --- specs/phase1/beacon-chain.md | 37 +++++++++++++++++++++++++++++++++++- specs/phase1/fork-choice.md | 9 +-------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index e2a46cd01..3f4ee9e46 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -10,9 +10,44 @@ - [Introduction](#introduction) - [Custom types](#custom-types) - [Configuration](#configuration) + - [Misc](#misc) + - [Shard block configs](#shard-block-configs) + - [Precomputed size verification points](#precomputed-size-verification-points) + - [Gwei values](#gwei-values) + - [Time parameters](#time-parameters) + - [Domain types](#domain-types) - [Updated containers](#updated-containers) + - [`AttestationData`](#attestationdata) + - [`BeaconBlock`](#beaconblock) + - [`BeaconState`](#beaconstate) - [New containers](#new-containers) + - [`DataCommitment`](#datacommitment) + - [`ShardHeader`](#shardheader) + - [`PendingShardHeader`](#pendingshardheader) - [Helper functions](#helper-functions) + - [Misc](#misc-1) + - [`compute_previous_slot`](#compute_previous_slot) + - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) + - [`compute_updated_gasprice`](#compute_updated_gasprice) + - [`compute_committee_source_epoch`](#compute_committee_source_epoch) + - [Beacon state accessors](#beacon-state-accessors) + - [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot) + - [`get_active_shard_count`](#get_active_shard_count) + - [`get_shard_committee`](#get_shard_committee) + - [`get_shard_proposer_index`](#get_shard_proposer_index) + - [`get_start_shard`](#get_start_shard) + - [Predicates](#predicates) + - [Block processing](#block-processing) + - [Operations](#operations) + - [New Attestation processing](#new-attestation-processing) + - [Updated `process_attestation`](#updated-process_attestation) + - [`update_pending_votes`](#update_pending_votes) + - [`process_shard_header`](#process_shard_header) + - [Shard transition processing](#shard-transition-processing) + - [Epoch transition](#epoch-transition) + - [Pending headers](#pending-headers) + - [Phase 1 final updates](#phase-1-final-updates) + - [Custody game updates](#custody-game-updates) @@ -551,7 +586,7 @@ def reset_pending_headers(state: BeaconState): ) state.current_epoch_pending_headers = [] # Add dummy "empty" PendingAttestations - # (default to vote for if no shard header availabl) + # (default to vote for if no shard header available) for slot in range(SLOTS_IN_EPOCH): for index in range(get_committee_count_per_slot(get_current_epoch(state))): shard = compute_shard_from_committee_index(state, index, slot) diff --git a/specs/phase1/fork-choice.md b/specs/phase1/fork-choice.md index 3accd2d10..858a0c872 100644 --- a/specs/phase1/fork-choice.md +++ b/specs/phase1/fork-choice.md @@ -8,14 +8,7 @@ - [Introduction](#introduction) - - [Updated data structures](#updated-data-structures) - - [Extended `Store`](#extended-store) - - [New data structures](#new-data-structures) - - [`ShardLatestMessage`](#shardlatestmessage) - - [`ShardStore`](#shardstore) - - [Updated helpers](#updated-helpers) - - [Updated `get_forkchoice_store`](#updated-get_forkchoice_store) - - [Updated `update_latest_messages`](#updated-update_latest_messages) +- [Dependency calculation](#dependency-calculation) From 55d9f62bf20b35dfaafa24dd3b22dc0df2efe710 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 11 Dec 2020 16:17:16 +0800 Subject: [PATCH 038/127] (OOP SSZ) if the fields are updated, we need to redefine them when defining the new SSZ class with the new field classes --- specs/phase1/beacon-chain.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 3f4ee9e46..5e72c47a7 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -141,6 +141,11 @@ class BeaconBlock(phase0.BeaconBlock): ```python class BeaconState(phase0.BeaconState): + # Updated fields + latest_block_header: BeaconBlockHeader + previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] + current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] + # New fields current_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] previous_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] most_recent_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] From 3211c11c17694b961b23d91f5992ee3d16c7908e Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 11 Dec 2020 16:39:47 +0800 Subject: [PATCH 039/127] Revert: `BeaconBlockHeader` was not changed --- specs/phase1/beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 5e72c47a7..0350d4732 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -142,7 +142,6 @@ class BeaconBlock(phase0.BeaconBlock): ```python class BeaconState(phase0.BeaconState): # Updated fields - latest_block_header: BeaconBlockHeader previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # New fields From 27dbb5e4a42d1efa1a5cdde2a6f6891e1e9bc126 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sat, 12 Dec 2020 11:29:50 +0800 Subject: [PATCH 040/127] Update specs/phase1/beacon-chain.md Co-authored-by: dankrad --- specs/phase1/beacon-chain.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 0350d4732..1adc83e84 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -332,7 +332,6 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: return Shard(shard) ``` -### Predicates ### Block processing From badc3eaa8e8a95bde2b44155c9a9ebe3f21a6ff8 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 13 Dec 2020 08:34:45 +0800 Subject: [PATCH 041/127] Update specs/phase1/beacon-chain.md Co-authored-by: dankrad --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 1adc83e84..8c3854e5f 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -362,7 +362,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) # Limit is dynamic based on active shard count - assert len(body.shard_headers) <= 4 * get_active_shard_count(state, get_current_epoch(state)) + assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state)) for_ops(body.shard_headers, process_shard_header) # See custody game spec. From 1bccf9ff027968ef7c88abe24af60425c555fc83 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 13 Dec 2020 08:34:59 +0800 Subject: [PATCH 042/127] Update specs/phase1/beacon-chain.md Co-authored-by: dankrad --- specs/phase1/beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 8c3854e5f..ecf529d97 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -73,7 +73,8 @@ We define the following Python custom types for type hinting and readability: | `MAX_SHARDS` | `uint64(2**10)` (= 1024) | Theoretical max shard count (used to determine data structure sizes) | | `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 2) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | -| `MAX_SHARD_HEADERS` | `MAX_SHARDS * 4` | | +| `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | +| `MAX_SHARD_HEADERS` | `MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD` | | ### Shard block configs From 42ad1208a9be39000134c0e5d2c56b1b5ed4757d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 13 Dec 2020 08:35:09 +0800 Subject: [PATCH 043/127] Update specs/phase1/beacon-chain.md Co-authored-by: dankrad --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index ecf529d97..5192fdb86 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -91,7 +91,7 @@ We define the following Python custom types for type hinting and readability: | `G2_ONE` | The G2 generator | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X - w ** revbit(a))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. +These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X ** POINTS_PER_SAMPLE - w ** (revbit(a, MAX_SAMPLES_PER_BLOCK) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values From 2abc7a4cf2e7b8cce4e20f02caf99947522b2f53 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Sun, 13 Dec 2020 08:36:45 +0800 Subject: [PATCH 044/127] pending_headers -> pending_shard_headers --- specs/phase1/beacon-chain.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 5192fdb86..91426d2ba 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -146,8 +146,8 @@ class BeaconState(phase0.BeaconState): previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # New fields - current_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] - previous_epoch_pending_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] + current_epoch_pending_shard_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] + previous_epoch_pending_shard_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] most_recent_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard @@ -395,9 +395,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: if compute_epoch_at_slot(slot) == get_current_epoch(state): - pending_headers = state.current_epoch_pending_headers + pending_headers = state.current_epoch_pending_shard_headers else: - pending_headers = state.previous_epoch_pending_headers + pending_headers = state.previous_epoch_pending_shard_headers # Create or update the PendingShardHeader object pending_header = None for header in pending_headers: @@ -452,9 +452,9 @@ def process_shard_header(state: BeaconState, ) # Get the correct pending header list if compute_epoch_at_slot(header.slot) == get_current_epoch(state): - pending_headers = state.current_epoch_pending_headers + pending_headers = state.current_epoch_pending_shard_headers else: - pending_headers = state.previous_epoch_pending_headers + pending_headers = state.previous_epoch_pending_shard_headers # Check that this header is not yet in the pending list for pending_header in pending_headers: @@ -517,7 +517,7 @@ def process_pending_headers(state: BeaconState): for shard in range(SHARD_COUNT): # Pending headers for this (slot, shard) combo candidates = [ - c for c in state.previous_epoch_pending_headers if + c for c in state.previous_epoch_pending_shard_headers if (c.slot, c.shard) == (slot, shard) ] if True not in [c.confirmed for c in candidates]: @@ -546,7 +546,7 @@ def process_pending_headers(state: BeaconState): for slot in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): state.most_recent_confirmed_commitments[shard][slot] = DataCommitment() - for c in state.previous_epoch_pending_headers: + for c in state.previous_epoch_pending_shard_headers: if c.confirmed: state.most_recent_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment ``` @@ -558,7 +558,7 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: for slot in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): confirmed_candidates = [ - c for c in state.previous_epoch_pending_headers if + c for c in state.previous_epoch_pending_shard_headers if (c.slot, c.shard, c.confirmed) == (slot, shard, True) ] if confirmed_candidates: @@ -580,7 +580,7 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: ```python def reset_pending_headers(state: BeaconState): - state.previous_epoch_pending_headers = state.current_epoch_pending_headers + state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers shards = [ compute_shard_from_committee_index(state, index, slot) for i in range() @@ -588,7 +588,7 @@ def reset_pending_headers(state: BeaconState): attestation.data.index, attestation.data.slot ) - state.current_epoch_pending_headers = [] + state.current_epoch_pending_shard_headers = [] # Add dummy "empty" PendingAttestations # (default to vote for if no shard header available) for slot in range(SLOTS_IN_EPOCH): @@ -599,7 +599,7 @@ def reset_pending_headers(state: BeaconState): header.slot, header.shard )) - state.current_epoch_pending_headers.append(PendingShardHeader( + state.current_epoch_pending_shard_headers.append(PendingShardHeader( slot=slot, shard=shard, commitment=DataCommitment(), From 4647a1b6e2e20a48d5518f338ad7b7866528810f Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 14 Dec 2020 22:57:53 +0000 Subject: [PATCH 045/127] Some small fixes --- specs/phase1/beacon-chain.md | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 91426d2ba..91ce411bf 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -72,9 +72,10 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `MAX_SHARDS` | `uint64(2**10)` (= 1024) | Theoretical max shard count (used to determine data structure sizes) | | `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | -| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 2) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | +| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | | `MAX_SHARD_HEADERS` | `MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD` | | +| `PRIMITIVE_ROOT_OF_UNITY` | `5` | Primitive root of unity of the BLS12_381 (inner) modulus | ### Shard block configs @@ -89,16 +90,17 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | | `G2_ONE` | The G2 generator | +| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X ** POINTS_PER_SAMPLE - w ** (revbit(a, MAX_SAMPLES_PER_BLOCK) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w` is the root of unity and `revbit` is the reverse-bit-order function. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. +These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values | Name | Value | Unit | Description | | - | - | - | - | -| `MAX_GASPRICE` | `Gwei(2**24)` (= 16,777,216) | Gwei | Max gasprice charged for an TARGET-sized shard block | -| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for an TARGET-sized shard block | +| `MAX_GASPRICE` | `Gwei(2**24)` (= 16,777,216) | Gwei | Max gasprice charged for a TARGET-sized shard block | +| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for a TARGET-sized shard block | ### Time parameters @@ -146,8 +148,8 @@ class BeaconState(phase0.BeaconState): previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # New fields - current_epoch_pending_shard_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] - previous_epoch_pending_shard_headers: List[PendingHeader, MAX_PENDING_HEADERS * SLOTS_PER_EPOCH] + current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] + previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] most_recent_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard @@ -205,6 +207,17 @@ class PendingShardHeader(Container): ### Misc +#### `reverse_bit_order` + +```python +def reverse_bit_order(n, order): + """ + Reverse the bit order of an integer n + """ + assert is_power_of_two(order) + return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2) +``` + #### `compute_previous_slot` ```python @@ -423,11 +436,11 @@ def update_pending_votes(state: BeaconState, ] if True not in [c.confirmed for c in all_candidates]: # Requirement 2: >= 2/3 of balance attesting - participants = get_attesting_indices(state, data, pending_commitment.votes) + participants = get_attesting_indices(state, attestationg.data, pending_commitment.votes) participants_balance = get_total_balance(state, participants) - full_committee = get_beacon_committee(state, data.slot, data.shard) + full_committee = get_beacon_committee(state, attestationg.data.slot, attestationg.data.shard) full_committee_balance = get_total_balance(state, full_committee) - if participants_balance * 2 > full_committee_balance: + if participants_balance * 3 > full_committee_balance * 2: pending_header.confirmed = True ``` From d3cb261a160f35b956c2d848b43e1993bf0c1dd5 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 16 Dec 2020 11:55:31 +0800 Subject: [PATCH 046/127] Delta minimum 1 --- specs/phase1/beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 91ce411bf..4e8f0d679 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -241,12 +241,12 @@ def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex ```python def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64, adjustment_quotient: uint64) -> Gwei: if shard_block_length > TARGET_SAMPLES_PER_BLOCK: - delta = (prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK) - // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) + delta = max(1, prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK) + // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) return min(prev_gasprice + delta, MAX_GASPRICE) else: - delta = (prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length) - // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) + delta = max(1, prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length) + // TARGET_SAMPLES_PER_BLOCK // adjustment_quotient) return max(prev_gasprice, MIN_GASPRICE + delta) - delta ``` From 315fe92f08fc7dcab4afb10f82f8f1404114ae29 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 16 Dec 2020 15:04:44 +0800 Subject: [PATCH 047/127] Added size check to include the degree check --- specs/phase1/beacon-chain.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 4e8f0d679..aa3d3e2c9 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -89,11 +89,11 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | -| `G2_ONE` | The G2 generator | +| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. +These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK-1] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * 2) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values @@ -458,10 +458,11 @@ def process_shard_header(state: BeaconState, compute_signing_root(header, get_domain(state, DOMAIN_SHARD_HEADER)), signed_header.signature ) - # Verify length of the header + # Verify length of the header, and simultaneously verify degree. + r = hash(header.commitment.point) assert ( bls.Pairing(header.length_proof, SIZE_CHECK_POINTS[header.commitment.length]) == - bls.Pairing(header.commitment.point, G2_ONE) + bls.Pairing(header.commitment.point, bls.Add(bls.Multiply(G2_ONE, r), G2_SETUP[-header.commitment.length-1])) ) # Get the correct pending header list if compute_epoch_at_slot(header.slot) == get_current_epoch(state): @@ -484,6 +485,10 @@ def process_shard_header(state: BeaconState, )) ``` +The length-and-degree proof works as follows. For a block B with length `l` (so `l` nonzero values in `[0...MAX_SAMPLES_PER_BLOCK-1]`), the length proof is supposed to be `(B / Z) * (r + X**(len(SETUP)-l))`, where `Z` is the minimal polynomial that is zero over `[l...MAX_SAMPLES_PER_BLOCK-1]` (see `SIZE_CHECK_POINTS` above). The goal is to ensure that a proof can only be constructed if (i) `B / Z` is itself non-fractional, meaning that `B` is a multiple of `Z`, and (ii) `deg(B) < MAX_SAMPLES_PER_BLOCK` (the block is not oversized). + +This is done by making the proof be a random linear combination of `B / Z` and `(B / Z) * (X**(len(SETUP)-l)`. The length proof will have the degree of `(B / Z) * X**(len(SETUP)-l)`, so `deg(B) - (MAX_SAMPLES_PER_BLOCK - l) + len(SETUP) - l`, simplified to `deg(B) - MAX_SAMPLES_PER_BLOCK + len(SETUP)`. Because it's only possible to generate proofs for polynomials with degree `< len(SETUP)`, it's this only possible to generate the proof if this expression is less than `len(SETUP)`, meaning that `deg(B)` must be strictly less than `MAX_SAMPLES_PER_BLOCK`. + ### Shard transition processing ### Epoch transition From 4348d3b255862993dfac23335404af1d62c12ba7 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Wed, 16 Dec 2020 14:14:21 +0000 Subject: [PATCH 048/127] Add data availability coding rate as a constant (makes it more explicit what's going on) --- specs/phase1/beacon-chain.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index aa3d3e2c9..b49fb00f3 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -76,6 +76,8 @@ We define the following Python custom types for type hinting and readability: | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | | `MAX_SHARD_HEADERS` | `MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD` | | | `PRIMITIVE_ROOT_OF_UNITY` | `5` | Primitive root of unity of the BLS12_381 (inner) modulus | +| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (=2) | Factor by which samples are extended for data availability encoding | + ### Shard block configs @@ -93,7 +95,7 @@ We define the following Python custom types for type hinting and readability: | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK-1] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * 2) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. +These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK-1] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values From 7d05c42cad82ac1319d5fb7d0e9ce0fbd6ec4844 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 18 Dec 2020 13:31:24 +0000 Subject: [PATCH 049/127] Improve/correct length checking --- specs/phase1/beacon-chain.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index b49fb00f3..99b8dec75 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -461,10 +461,9 @@ def process_shard_header(state: BeaconState, signed_header.signature ) # Verify length of the header, and simultaneously verify degree. - r = hash(header.commitment.point) assert ( bls.Pairing(header.length_proof, SIZE_CHECK_POINTS[header.commitment.length]) == - bls.Pairing(header.commitment.point, bls.Add(bls.Multiply(G2_ONE, r), G2_SETUP[-header.commitment.length-1])) + bls.Pairing(header.commitment.point, G2_SETUP[-header.commitment.length])) ) # Get the correct pending header list if compute_epoch_at_slot(header.slot) == get_current_epoch(state): @@ -487,9 +486,9 @@ def process_shard_header(state: BeaconState, )) ``` -The length-and-degree proof works as follows. For a block B with length `l` (so `l` nonzero values in `[0...MAX_SAMPLES_PER_BLOCK-1]`), the length proof is supposed to be `(B / Z) * (r + X**(len(SETUP)-l))`, where `Z` is the minimal polynomial that is zero over `[l...MAX_SAMPLES_PER_BLOCK-1]` (see `SIZE_CHECK_POINTS` above). The goal is to ensure that a proof can only be constructed if (i) `B / Z` is itself non-fractional, meaning that `B` is a multiple of `Z`, and (ii) `deg(B) < MAX_SAMPLES_PER_BLOCK` (the block is not oversized). +The length-and-degree proof works as follows. For a block `B` with length `l` (so `l` nonzero values in `[0...MAX_SAMPLES_PER_BLOCK - 1]`), the length proof is the commitment to the polynomial `(B(X) / Z(X)) * (X**(MAX_DEGREE + 1 - l))`, where `Z` is the minimal polynomial that is zero over `ROOT_OF_UNITY ** [l...MAX_SAMPLES_PER_BLOCK - 1]` (see `SIZE_CHECK_POINTS` above) and `MAX_DEGREE` the the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if (i) `B / Z` is itself non-fractional, meaning that `B` is a multiple of `Z`, and (ii) `deg(B) < MAX_SAMPLES_PER_BLOCK` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). -This is done by making the proof be a random linear combination of `B / Z` and `(B / Z) * (X**(len(SETUP)-l)`. The length proof will have the degree of `(B / Z) * X**(len(SETUP)-l)`, so `deg(B) - (MAX_SAMPLES_PER_BLOCK - l) + len(SETUP) - l`, simplified to `deg(B) - MAX_SAMPLES_PER_BLOCK + len(SETUP)`. Because it's only possible to generate proofs for polynomials with degree `< len(SETUP)`, it's this only possible to generate the proof if this expression is less than `len(SETUP)`, meaning that `deg(B)` must be strictly less than `MAX_SAMPLES_PER_BLOCK`. +The length proof will have the degree of `(B(X) / Z(X)) * X**(MAX_DEGREE + 1 - l)`, so `deg(B) - (MAX_SAMPLES_PER_BLOCK - l) + MAX_DEGREE + 1 - l`, simplified to `deg(B) - MAX_SAMPLES_PER_BLOCK + MAX_DEGREE + 1`. Because it's only possible to commit to polynomials with degree `<= MAX_DEGREE`, it's only possible to generate the proof if this expression is less than or equal to `MAX_DEGREE`, meaning that `deg(B)` must be strictly less than `MAX_SAMPLES_PER_BLOCK`. ### Shard transition processing From 68844f2e9d8e0119f86f3cfab9fcaebb8fa7fb7e Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 18 Dec 2020 13:57:05 +0000 Subject: [PATCH 050/127] Increase maximum allowable gas price and restrict proposer selection so it is guaranteed to be covered --- specs/phase1/beacon-chain.md | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 99b8dec75..32851bdd7 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -101,7 +101,7 @@ These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_P | Name | Value | Unit | Description | | - | - | - | - | -| `MAX_GASPRICE` | `Gwei(2**24)` (= 16,777,216) | Gwei | Max gasprice charged for a TARGET-sized shard block | +| `MAX_GASPRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max gasprice charged for a TARGET-sized shard block | | `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for a TARGET-sized shard block | ### Time parameters @@ -309,6 +309,28 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) - ) ``` +#### `compute_proposer_index` + +Updated version to get a proposer index that will only allow proposers with a certain minimum balance, ensuring that the balance is always sufficient to cover gas costs. + +```python +def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32, min_effective_balance: GWei = GWei(0)) -> ValidatorIndex: + """ + Return from ``indices`` a random index sampled by effective balance. + """ + assert len(indices) > 0 + MAX_RANDOM_BYTE = 2**8 - 1 + i = uint64(0) + total = uint64(len(indices)) + while True: + candidate_index = indices[compute_shuffled_index(i % total, total, seed)] + random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] + effective_balance = state.validators[candidate_index].effective_balance + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte and effective_balance > min_effective_balance: + return candidate_index + i += 1 +``` + #### `get_shard_proposer_index` ```python @@ -318,8 +340,11 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard """ epoch = compute_epoch_at_slot(slot) committee = get_shard_committee(beacon_state, epoch, shard) - seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_COMMITTEE) + uint_to_bytes(slot)) - return compute_proposer_index(state, committee, seed) + seed = hash(get_seed(beacon_state, + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT + + return compute_proposer_index(state, committee, seed, + state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION) ``` #### `get_start_shard` From 0769b2ca6d8de6958560883b39f0b87c75e53126 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 18 Dec 2020 20:33:34 +0000 Subject: [PATCH 051/127] Fill in with zeroes only up to the next power of two, to reduce degrees of polynomials --- specs/phase1/beacon-chain.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 32851bdd7..b9d74105f 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -95,7 +95,7 @@ We define the following Python custom types for type hinting and readability: | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...MAX_SAMPLES_PER_BLOCK-1] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N^2/log(N)) calculation using fast-linear-combinations in G2. +These points are the G2-side Kate commitments to `product[a in i...next_power_of_two(i)] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N**2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values @@ -209,6 +209,13 @@ class PendingShardHeader(Container): ### Misc +#### `next_power_of_two` + +```python +def next_power_of_two(x): + return 2 ** ((x - 1).bit_length()) +``` + #### `reverse_bit_order` ```python @@ -511,9 +518,9 @@ def process_shard_header(state: BeaconState, )) ``` -The length-and-degree proof works as follows. For a block `B` with length `l` (so `l` nonzero values in `[0...MAX_SAMPLES_PER_BLOCK - 1]`), the length proof is the commitment to the polynomial `(B(X) / Z(X)) * (X**(MAX_DEGREE + 1 - l))`, where `Z` is the minimal polynomial that is zero over `ROOT_OF_UNITY ** [l...MAX_SAMPLES_PER_BLOCK - 1]` (see `SIZE_CHECK_POINTS` above) and `MAX_DEGREE` the the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if (i) `B / Z` is itself non-fractional, meaning that `B` is a multiple of `Z`, and (ii) `deg(B) < MAX_SAMPLES_PER_BLOCK` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). +The length-and-degree proof works as follows. For a block `B` with length `l` (so `l` nonzero values in `[0... - 1]`), the length proof is the commitment to the polynomial `(B(X) / Z(X)) * (X**(MAX_DEGREE + 1 - l))`, where `Z` is the minimal polynomial that is zero over `ROOT_OF_UNITY ** [l...next_power_of_two(l) - 1]` (see `SIZE_CHECK_POINTS` above) and `MAX_DEGREE` the the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if (i) `B / Z` is itself non-fractional, meaning that `B` is a multiple of `Z`, and (ii) `deg(B) < next_power_of_two(l)` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). -The length proof will have the degree of `(B(X) / Z(X)) * X**(MAX_DEGREE + 1 - l)`, so `deg(B) - (MAX_SAMPLES_PER_BLOCK - l) + MAX_DEGREE + 1 - l`, simplified to `deg(B) - MAX_SAMPLES_PER_BLOCK + MAX_DEGREE + 1`. Because it's only possible to commit to polynomials with degree `<= MAX_DEGREE`, it's only possible to generate the proof if this expression is less than or equal to `MAX_DEGREE`, meaning that `deg(B)` must be strictly less than `MAX_SAMPLES_PER_BLOCK`. +The length proof will have the degree of `(B(X) / Z(X)) * X**(MAX_DEGREE + 1 - l)`, so `deg(B) - (next_power_of_two(l) - l) + MAX_DEGREE + 1 - l`, simplified to `deg(B) - next_power_of_two(l) + MAX_DEGREE + 1`. Because it's only possible to commit to polynomials with degree `<= MAX_DEGREE`, it's only possible to generate the proof if this expression is less than or equal to `MAX_DEGREE`, meaning that `deg(B)` must be strictly less than `next_power_of_two(l)`. ### Shard transition processing From c674a27a25d00fbd66e5b5ff21c54f2c4fa7c885 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 15:57:18 +0000 Subject: [PATCH 052/127] Fix seed in get_shard_proposer_index --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index b9d74105f..230f83ed9 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -347,7 +347,7 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard """ epoch = compute_epoch_at_slot(slot) committee = get_shard_committee(beacon_state, epoch, shard) - seed = hash(get_seed(beacon_state, + seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT return compute_proposer_index(state, committee, seed, From 6e249e8932de33f5e6612c52a9c3caff36f43de0 Mon Sep 17 00:00:00 2001 From: dankrad Date: Mon, 28 Dec 2020 16:01:47 +0000 Subject: [PATCH 053/127] Update specs/phase1/beacon-chain.md Co-authored-by: terence tsao --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 230f83ed9..f0a6e1209 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -168,7 +168,7 @@ class DataCommitment(Container): # Kate commitment to the data point: BLSCommitment # Length of the data in samples - length: length + length: uint64 ``` ### `ShardHeader` From 126d07cfb396fc329db2c644eca6679db2355dbb Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 16:03:20 +0000 Subject: [PATCH 054/127] MAX_COMMITTEE_SIZE -> MAX_VALIDATORS_PER_COMMITTEE --- specs/phase1/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index f0a6e1209..d33304108 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -513,7 +513,7 @@ def process_shard_header(state: BeaconState, shard=header.shard, commitment=header.commitment, root=header_root, - votes=Bitlist[MAX_COMMITTEE_SIZE]([0] * committee_length), + votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), confirmed=False )) ``` @@ -655,7 +655,7 @@ def reset_pending_headers(state: BeaconState): shard=shard, commitment=DataCommitment(), root=Root(), - votes=Bitlist[MAX_COMMITTEE_SIZE]([0] * committee_length), + votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), confirmed=False )) From f0fc9c6462fe6bf331ee5928221694706a602ebc Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 16:12:31 +0000 Subject: [PATCH 055/127] Add modulus --- specs/phase1/beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index d33304108..7737aa9ea 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -92,7 +92,8 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | | `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | -| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | +| `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | +| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | These points are the G2-side Kate commitments to `product[a in i...next_power_of_two(i)] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N**2/log(N)) calculation using fast-linear-combinations in G2. From 0408aa52cc255f5a58678bfd7261aa246d78d7c5 Mon Sep 17 00:00:00 2001 From: dankrad Date: Mon, 28 Dec 2020 16:14:05 +0000 Subject: [PATCH 056/127] Update specs/phase1/beacon-chain.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Radosław Kapka --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 7737aa9ea..ccfbed3db 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -191,7 +191,7 @@ class ShardHeader(Container): ```python class PendingShardHeader(Container): # Slot and shard that this header is intended for - slot: uint64 + slot: Slot shard: Shard # Kate commitment to the data commitment: BLSCommitment From 6660262f5d41b6ea053060f781fb886580aaa552 Mon Sep 17 00:00:00 2001 From: dankrad Date: Mon, 28 Dec 2020 16:15:07 +0000 Subject: [PATCH 057/127] Update specs/phase1/beacon-chain.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Radosław Kapka --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index ccfbed3db..930f103ed 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -53,7 +53,7 @@ ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/r1XzqYIOv) and more broadly [here](https://arxiv.org/abs/1809.09044), using Kate commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. +This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD) and more broadly [here](https://arxiv.org/abs/1809.09044), using Kate commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. ## Custom types From a260dbcf15d93d6ffe85c302893d57a511cc4d38 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 16:24:31 +0000 Subject: [PATCH 058/127] Kate -> KZG10 (better in formal contexts) --- specs/phase1/beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 930f103ed..d82ec2ac4 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -53,7 +53,7 @@ ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD) and more broadly [here](https://arxiv.org/abs/1809.09044), using Kate commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. +This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD) and more broadly [here](https://arxiv.org/abs/1809.09044), using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. ## Custom types @@ -96,7 +96,7 @@ We define the following Python custom types for type hinting and readability: | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | | `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | -These points are the G2-side Kate commitments to `product[a in i...next_power_of_two(i)] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N**2/log(N)) calculation using fast-linear-combinations in G2. +These points are the G2-side KZG10 commitments to `product[a in i...next_power_of_two(i)] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N**2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values @@ -166,7 +166,7 @@ The following containers are new in Phase 1. ```python class DataCommitment(Container): - # Kate commitment to the data + # KZG10 commitment to the data point: BLSCommitment # Length of the data in samples length: uint64 @@ -193,7 +193,7 @@ class PendingShardHeader(Container): # Slot and shard that this header is intended for slot: Slot shard: Shard - # Kate commitment to the data + # KZG10 commitment to the data commitment: BLSCommitment # hash_tree_root of the ShardHeader (stored so that attestations # can be checked against it) From f44b7ffe4860df971495eed07749aa956da396f6 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 16:36:28 +0000 Subject: [PATCH 059/127] Change length proof to degree proof --- specs/phase1/beacon-chain.md | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index d82ec2ac4..e909769e6 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -94,9 +94,6 @@ We define the following Python custom types for type hinting and readability: | `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | | `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | -| `SIZE_CHECK_POINTS` | Type `List[G2, MAX_SAMPLES_PER_BLOCK + 1]`; TO BE COMPUTED | - -These points are the G2-side KZG10 commitments to `product[a in i...next_power_of_two(i)] (X ** POINTS_PER_SAMPLE - w ** (reverse_bit_order(a, MAX_SAMPLES_PER_BLOCK * DATA_AVAILABILITY_INVERSE_CODING_RATE) * POINTS_PER_SAMPLE))` for each `i` in `[0...MAX_SAMPLES_PER_BLOCK]`, where `w = ROOT_OF_UNITY`. They are used to verify block size proofs. They can be computed with a one-time O(N**2/log(N)) calculation using fast-linear-combinations in G2. ### Gwei values @@ -181,9 +178,8 @@ class ShardHeader(Container): shard: Shard # The actual data commitment commitment: DataCommitment - # Proof of the length (more precisely, proof that values at - # positions >= the length all equal zero) - length_proof: BLSCommitment + # Proof that the degree < commitment.length + degree_proof: BLSCommitment ``` ### `PendingShardHeader` @@ -495,7 +491,7 @@ def process_shard_header(state: BeaconState, ) # Verify length of the header, and simultaneously verify degree. assert ( - bls.Pairing(header.length_proof, SIZE_CHECK_POINTS[header.commitment.length]) == + bls.Pairing(header.degree_proof, G2_SETUP[0]) == bls.Pairing(header.commitment.point, G2_SETUP[-header.commitment.length])) ) # Get the correct pending header list @@ -519,9 +515,7 @@ def process_shard_header(state: BeaconState, )) ``` -The length-and-degree proof works as follows. For a block `B` with length `l` (so `l` nonzero values in `[0... - 1]`), the length proof is the commitment to the polynomial `(B(X) / Z(X)) * (X**(MAX_DEGREE + 1 - l))`, where `Z` is the minimal polynomial that is zero over `ROOT_OF_UNITY ** [l...next_power_of_two(l) - 1]` (see `SIZE_CHECK_POINTS` above) and `MAX_DEGREE` the the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if (i) `B / Z` is itself non-fractional, meaning that `B` is a multiple of `Z`, and (ii) `deg(B) < next_power_of_two(l)` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). - -The length proof will have the degree of `(B(X) / Z(X)) * X**(MAX_DEGREE + 1 - l)`, so `deg(B) - (next_power_of_two(l) - l) + MAX_DEGREE + 1 - l`, simplified to `deg(B) - next_power_of_two(l) + MAX_DEGREE + 1`. Because it's only possible to commit to polynomials with degree `<= MAX_DEGREE`, it's only possible to generate the proof if this expression is less than or equal to `MAX_DEGREE`, meaning that `deg(B)` must be strictly less than `next_power_of_two(l)`. +The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`, where `MAX_DEGREE` the the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). ### Shard transition processing From 41f97162d6a0562c0ef13e036b19fe2bea44660f Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 16:53:50 +0000 Subject: [PATCH 060/127] most_recent_confirmed_commitments -> grantparent_epoch_confirmed_commitments --- specs/phase1/beacon-chain.md | 6 +++--- specs/phase1/fork-choice.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index e909769e6..44df6d16a 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -150,7 +150,7 @@ class BeaconState(phase0.BeaconState): # New fields current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] - most_recent_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] + grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -591,10 +591,10 @@ def process_pending_headers(state: BeaconState): candidates[winning_index].confirmed = True for slot in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): - state.most_recent_confirmed_commitments[shard][slot] = DataCommitment() + state.grandparent_epoch_confirmed_commitments[shard][slot] = DataCommitment() for c in state.previous_epoch_pending_shard_headers: if c.confirmed: - state.most_recent_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment + state.grandparent_epoch_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment ``` ```python diff --git a/specs/phase1/fork-choice.md b/specs/phase1/fork-choice.md index 858a0c872..e9c9f4f02 100644 --- a/specs/phase1/fork-choice.md +++ b/specs/phase1/fork-choice.md @@ -27,7 +27,7 @@ def get_new_dependencies(state: BeaconState) -> Set[DataCommitment]: # Already confirmed during previous epoch [c.commitment for c in state.previous_epoch_pending_headers if c.confirmed] + # Confirmed in the epoch before the previous - [c for c in shard for shard in state.most_recent_confirmed_commitments if c != DataCommitment()] + [c for c in shard for shard in state.grandparent_epoch_confirmed_commitments if c != DataCommitment()] ) ``` From 7fc34c801deeeece6b32fc0ed6369ca120e52058 Mon Sep 17 00:00:00 2001 From: dankrad Date: Mon, 28 Dec 2020 16:55:06 +0000 Subject: [PATCH 061/127] Update specs/phase1/beacon-chain.md Co-authored-by: terence tsao --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 44df6d16a..66026d9a9 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -193,7 +193,7 @@ class PendingShardHeader(Container): commitment: BLSCommitment # hash_tree_root of the ShardHeader (stored so that attestations # can be checked against it) - root: Hash + root: Root # Length of the data in samples length: uint64 # Who voted for the header From f216000815f5ba4cbecc0526bcd84d982b4f9983 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 16:56:43 +0000 Subject: [PATCH 062/127] MAX_COMMITTEE_SIZE -> MAX_VALIDATORS_PER_COMMITTEE (missed one) --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 66026d9a9..211c632a9 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -197,7 +197,7 @@ class PendingShardHeader(Container): # Length of the data in samples length: uint64 # Who voted for the header - votes: Bitlist[MAX_COMMITTEE_SIZE] + votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] # Has this header been confirmed? confirmed: bool ``` From eccae0aa896563c158ac520c1d6f6cde3b1c0a56 Mon Sep 17 00:00:00 2001 From: dankrad Date: Mon, 28 Dec 2020 16:57:44 +0000 Subject: [PATCH 063/127] Update specs/phase1/beacon-chain.md Co-authored-by: terence tsao --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 211c632a9..d539fabc8 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -148,8 +148,8 @@ class BeaconState(phase0.BeaconState): previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # New fields - current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] + current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard From 29a5d4c8367a1452a76ae195367f9cf35eeff3cd Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 17:21:46 +0000 Subject: [PATCH 064/127] BLSCommitment -> DataCommitment --- specs/phase1/beacon-chain.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index d539fabc8..7fc216a36 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -190,12 +190,10 @@ class PendingShardHeader(Container): slot: Slot shard: Shard # KZG10 commitment to the data - commitment: BLSCommitment + commitment: DataCommitment # hash_tree_root of the ShardHeader (stored so that attestations # can be checked against it) root: Root - # Length of the data in samples - length: uint64 # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] # Has this header been confirmed? From fc4dad6a1360945058b28bfa522ade07ee6259d5 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 17:38:09 +0000 Subject: [PATCH 065/127] Fix degree proof for length 0 (degree -inf) --- specs/phase1/beacon-chain.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 7fc216a36..b0640a0c9 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -91,7 +91,8 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | -| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | +| `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | +| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` | | `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | @@ -487,7 +488,9 @@ def process_shard_header(state: BeaconState, compute_signing_root(header, get_domain(state, DOMAIN_SHARD_HEADER)), signed_header.signature ) - # Verify length of the header, and simultaneously verify degree. + # Verify the length by verifying the degree. + if header.commitment.length == 0: + assert header.degree_proof == G1_SETUP[0] assert ( bls.Pairing(header.degree_proof, G2_SETUP[0]) == bls.Pairing(header.commitment.point, G2_SETUP[-header.commitment.length])) From ac0686de045c78376d9b2f2d193890d3a8c77672 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 17:39:49 +0000 Subject: [PATCH 066/127] Refactor loop --- specs/phase1/beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index b0640a0c9..5244dbe99 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -567,10 +567,10 @@ def process_pending_headers(state: BeaconState): c for c in state.previous_epoch_pending_shard_headers if (c.slot, c.shard) == (slot, shard) ] + # The entire committee (and its balance) + full_committee = get_beacon_committee(state, slot, shard) + full_committee_balance = get_total_balance(state, full_committee) if True not in [c.confirmed for c in candidates]: - # The entire committee (and its balance) - full_committee = get_beacon_committee(state, slot, shard) - full_committee_balance = get_total_balance(state, full_committee) # The set of voters who voted for each header # (and their total balances) voting_sets = [ From 0af9e25e6b501ad1fa2028b760983d5c4ea85080 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 28 Dec 2020 17:47:04 +0000 Subject: [PATCH 067/127] Revert hackmd link -- original link was good, just accidentally overwritten --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 5244dbe99..9de60c645 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -53,7 +53,7 @@ ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD) and more broadly [here](https://arxiv.org/abs/1809.09044), using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. +This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044), using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. ## Custom types From ca6323866dd518958b7e8f52e148a3d37515f160 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 31 Dec 2020 09:35:27 +0800 Subject: [PATCH 068/127] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Radosław Kapka Co-authored-by: dankrad Co-authored-by: terence tsao --- specs/phase1/beacon-chain.md | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 9de60c645..1607a4e41 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -114,6 +114,7 @@ We define the following Python custom types for type hinting and readability: | Name | Value | | - | - | | `DOMAIN_SHARD_HEADER` | `DomainType('0x80000000')` | +| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | ## Updated containers @@ -424,12 +425,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: phase0.process_attestation(state, attestation) - update_pending_votes( - state=state, - attestation: Attestation, - root=, - aggregation_bits=attestation.aggregation_bits - ) + update_pending_votes(state, attestation) ``` #### `update_pending_votes` @@ -447,7 +443,7 @@ def update_pending_votes(state: BeaconState, if header.root == attestation.data.shard_header_root: pending_header = header assert pending_header is not None - assert pending_header.slot == attestation.data.slot + 1 + assert pending_header.slot == attestation.data.slot assert pending_header.shard == compute_shard_from_committee_index( state, attestation.data.index, @@ -466,9 +462,9 @@ def update_pending_votes(state: BeaconState, ] if True not in [c.confirmed for c in all_candidates]: # Requirement 2: >= 2/3 of balance attesting - participants = get_attesting_indices(state, attestationg.data, pending_commitment.votes) + participants = get_attesting_indices(state, attestation.data, pending_commitment.votes) participants_balance = get_total_balance(state, participants) - full_committee = get_beacon_committee(state, attestationg.data.slot, attestationg.data.shard) + full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) full_committee_balance = get_total_balance(state, full_committee) if participants_balance * 3 > full_committee_balance * 2: pending_header.confirmed = True @@ -478,7 +474,7 @@ def update_pending_votes(state: BeaconState, ```python def process_shard_header(state: BeaconState, - signed_header: Signed[ShardDataHeader]) -> None: + signed_header: Signed[ShardHeader]) -> None: header = signed_header.message header_root = hash_tree_root(header) # Verify signature @@ -499,6 +495,7 @@ def process_shard_header(state: BeaconState, if compute_epoch_at_slot(header.slot) == get_current_epoch(state): pending_headers = state.current_epoch_pending_shard_headers else: + assert compute_epoch_at_slot(header.slot) == get_previous_epoch(state): pending_headers = state.previous_epoch_pending_shard_headers # Check that this header is not yet in the pending list @@ -561,7 +558,7 @@ def process_epoch(state: BeaconState) -> None: def process_pending_headers(state: BeaconState): for slot in range(SLOTS_PER_EPOCH): - for shard in range(SHARD_COUNT): + for shard in range(get_active_shard_count(state)): # Pending headers for this (slot, shard) combo candidates = [ c for c in state.previous_epoch_pending_shard_headers if @@ -601,7 +598,7 @@ def process_pending_headers(state: BeaconState): ```python def charge_confirmed_header_fees(state: BeaconState) -> None: new_gasprice = state.shard_gasprice - adjustment_quotient = get_active_shard_count(state) * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT + adjustment_quotient = get_active_shard_count(state, get_current_epoch(state)) * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT for slot in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): confirmed_candidates = [ From 3e6baf1ef1e081ed78f8c56267ff3ba07624f27d Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 31 Dec 2020 09:56:29 +0800 Subject: [PATCH 069/127] Fixed index vs shard and beacon committee getting --- specs/phase1/beacon-chain.md | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 1607a4e41..e59696806 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -234,14 +234,6 @@ def compute_previous_slot(slot: Slot) -> Slot: return Slot(0) ``` -#### `compute_shard_from_committee_index` - -```python -def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex, slot: Slot) -> Shard: - active_shards = get_active_shard_count(state) - return Shard((index + get_start_shard(state, slot)) % active_shards) -``` - #### `compute_updated_gasprice` ```python @@ -377,6 +369,22 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: return Shard(shard) ``` +#### `compute_shard_from_committee_index` + +```python +def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Shard: + active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) + return Shard((index + get_start_shard(state, slot)) % active_shards) +``` + +#### `compute_committee_index_from_shard` + +```python +def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex: + active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) + return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) +``` + ### Block processing @@ -446,8 +454,8 @@ def update_pending_votes(state: BeaconState, assert pending_header.slot == attestation.data.slot assert pending_header.shard == compute_shard_from_committee_index( state, + attestation.data.slot, attestation.data.index, - attestation.data.slot ) pending_header.votes = bitwise_or( pending_header.votes, @@ -502,7 +510,8 @@ def process_shard_header(state: BeaconState, for pending_header in pending_headers: assert header_root != pending_header.root # Include it in the pending list - committee_length = len(get_beacon_committee(state, header.slot, header.shard)) + index = compute_committee_index_from_shard(state, header.slot, header.shard) + committee_length = len(get_beacon_committee(state, header.slot, index)) pending_headers.append(PendingShardHeader( slot=header.slot, shard=header.shard, @@ -626,7 +635,7 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: def reset_pending_headers(state: BeaconState): state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers shards = [ - compute_shard_from_committee_index(state, index, slot) + compute_shard_from_committee_index(state, slot, index) for i in range() state, attestation.data.index, @@ -637,7 +646,7 @@ def reset_pending_headers(state: BeaconState): # (default to vote for if no shard header available) for slot in range(SLOTS_IN_EPOCH): for index in range(get_committee_count_per_slot(get_current_epoch(state))): - shard = compute_shard_from_committee_index(state, index, slot) + shard = compute_shard_from_committee_index(state, slot, index) committee_length = len(get_beacon_committee( state, header.slot, From 1192158848c622011f7b3fe758f076ac90e40457 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 4 Jan 2021 11:17:30 -0700 Subject: [PATCH 070/127] minor cleanups/fixes to shard data avail PR --- specs/phase1/beacon-chain.md | 252 +++++++++++++++++++---------------- 1 file changed, 138 insertions(+), 114 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index e59696806..8c18c682d 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -23,20 +23,24 @@ - [New containers](#new-containers) - [`DataCommitment`](#datacommitment) - [`ShardHeader`](#shardheader) + - [`SignedShardHeader`](#signedshardheader) - [`PendingShardHeader`](#pendingshardheader) - [Helper functions](#helper-functions) - [Misc](#misc-1) + - [`next_power_of_two`](#next_power_of_two) + - [`reverse_bit_order`](#reverse_bit_order) - [`compute_previous_slot`](#compute_previous_slot) - - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) - [`compute_updated_gasprice`](#compute_updated_gasprice) - [`compute_committee_source_epoch`](#compute_committee_source_epoch) - [Beacon state accessors](#beacon-state-accessors) - [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot) - [`get_active_shard_count`](#get_active_shard_count) - [`get_shard_committee`](#get_shard_committee) + - [`compute_proposer_index`](#compute_proposer_index) - [`get_shard_proposer_index`](#get_shard_proposer_index) - [`get_start_shard`](#get_start_shard) - - [Predicates](#predicates) + - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) + - [`compute_committee_index_from_shard`](#compute_committee_index_from_shard) - [Block processing](#block-processing) - [Operations](#operations) - [New Attestation processing](#new-attestation-processing) @@ -139,7 +143,7 @@ class AttestationData(Container): ```python class BeaconBlock(phase0.BeaconBlock): - shard_headers: List[Signed[ShardHeader], MAX_SHARD_HEADERS] + shard_headers: List[SignedShardHeader, MAX_SHARD_HEADERS] ``` ### `BeaconState` @@ -184,6 +188,14 @@ class ShardHeader(Container): degree_proof: BLSCommitment ``` +### `SignedShardHeader` + +```python +class SignedShardHeader(Container): + message: ShardHeader + signature: BLSSignature +``` + ### `PendingShardHeader` ```python @@ -193,8 +205,7 @@ class PendingShardHeader(Container): shard: Shard # KZG10 commitment to the data commitment: DataCommitment - # hash_tree_root of the ShardHeader (stored so that attestations - # can be checked against it) + # hash_tree_root of the ShardHeader (stored so that attestations can be checked against it) root: Root # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] @@ -307,10 +318,14 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) - #### `compute_proposer_index` -Updated version to get a proposer index that will only allow proposers with a certain minimum balance, ensuring that the balance is always sufficient to cover gas costs. +Updated version to get a proposer index that will only allow proposers with a certain minimum balance, +ensuring that the balance is always sufficient to cover gas costs. ```python -def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32, min_effective_balance: GWei = GWei(0)) -> ValidatorIndex: +def compute_proposer_index(beacon_state: BeaconState, + indices: Sequence[ValidatorIndex], + seed: Bytes32, + min_effective_balance: GWei = GWei(0)) -> ValidatorIndex: """ Return from ``indices`` a random index sampled by effective balance. """ @@ -321,8 +336,10 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex] while True: candidate_index = indices[compute_shuffled_index(i % total, total, seed)] random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] - effective_balance = state.validators[candidate_index].effective_balance - if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte and effective_balance > min_effective_balance: + effective_balance = beacon_state.validators[candidate_index].effective_balance + if effective_balance <= min_effective_balance: + continue + if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index i += 1 ``` @@ -336,11 +353,18 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard """ epoch = compute_epoch_at_slot(slot) committee = get_shard_committee(beacon_state, epoch, shard) - seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) - EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT + seed = hash(get_seed(beacon_state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(beacon_state.slot)) - return compute_proposer_index(state, committee, seed, - state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION) + # Proposer must have sufficient balance to pay for worst case fee burn + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = ( + (EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT) + * HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT + ) + min_effective_balance = ( + beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK + + EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION + ) + return compute_proposer_index(beacon_state, committee, seed, min_effective_balance) ``` #### `get_start_shard` @@ -358,7 +382,6 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot))) active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot))) shard = (shard + committee_count) % active_shard_count - return Shard(shard) elif slot < current_epoch_start_slot: # Previous epoch for _slot in list(range(slot, current_epoch_start_slot))[::-1]: @@ -410,20 +433,16 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) + # Limit is dynamic based on active shard count + assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state)) + for_ops(body.shard_headers, process_shard_header) # New attestation processing for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) - # Limit is dynamic based on active shard count - assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state)) - for_ops(body.shard_headers, process_shard_header) # See custody game spec. process_custody_game_operations(state, body) - - process_shard_transitions(state, body.shard_transitions, body.attestations) - - # TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs) ``` ### New Attestation processing @@ -441,11 +460,11 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ```python def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: - if compute_epoch_at_slot(slot) == get_current_epoch(state): + # Find and update the PendingShardHeader object, invalid block if pending header not in state + if compute_epoch_at_slot(attestation.data.slot) == get_current_epoch(state): pending_headers = state.current_epoch_pending_shard_headers else: pending_headers = state.previous_epoch_pending_shard_headers - # Create or update the PendingShardHeader object pending_header = None for header in pending_headers: if header.root == attestation.data.shard_header_root: @@ -457,10 +476,8 @@ def update_pending_votes(state: BeaconState, attestation.data.slot, attestation.data.index, ) - pending_header.votes = bitwise_or( - pending_header.votes, - attestation.aggregation_bits - ) + for i in range(len(pending_header.votes)): + pending_header.votes[i] = pending_header.votes[i] or attestation.aggregation_bits[i] # Check if the PendingShardHeader is eligible for expedited confirmation # Requirement 1: nothing else confirmed @@ -468,47 +485,49 @@ def update_pending_votes(state: BeaconState, c for c in pending_headers if (c.slot, c.shard) == (pending_header.slot, pending_header.shard) ] - if True not in [c.confirmed for c in all_candidates]: - # Requirement 2: >= 2/3 of balance attesting - participants = get_attesting_indices(state, attestation.data, pending_commitment.votes) - participants_balance = get_total_balance(state, participants) - full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) - full_committee_balance = get_total_balance(state, full_committee) - if participants_balance * 3 > full_committee_balance * 2: - pending_header.confirmed = True + if True in [c.confirmed for c in all_candidates]: + return + + # Requirement 2: >= 2/3 of balance attesting + participants = get_attesting_indices(state, attestation.data, pending_commitment.votes) + participants_balance = get_total_balance(state, participants) + full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) + full_committee_balance = get_total_balance(state, full_committee) + if participants_balance * 3 >= full_committee_balance * 2: + pending_header.confirmed = True ``` #### `process_shard_header` ```python def process_shard_header(state: BeaconState, - signed_header: Signed[ShardHeader]) -> None: + signed_header: SignedShardHeader) -> None: header = signed_header.message header_root = hash_tree_root(header) + assert compute_epoch_at_slot(header.slot) in [get_previous_epoch(state), get_current_epoch(state)] + # Verify signature signer_index = get_shard_proposer_index(state, header.slot, header.shard) - assert bls.Verify( - state.validators[signer_index].pubkey, - compute_signing_root(header, get_domain(state, DOMAIN_SHARD_HEADER)), - signed_header.signature - ) + signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_HEADER)) + assert bls.Verify(state.validators[signer_index].pubkey, signing_root, signed_header.signature) + # Verify the length by verifying the degree. if header.commitment.length == 0: assert header.degree_proof == G1_SETUP[0] assert ( - bls.Pairing(header.degree_proof, G2_SETUP[0]) == - bls.Pairing(header.commitment.point, G2_SETUP[-header.commitment.length])) + bls.Pairing(header.degree_proof, G2_SETUP[0]) + == bls.Pairing(header.commitment.point, G2_SETUP[-header.commitment.length])) ) + # Get the correct pending header list if compute_epoch_at_slot(header.slot) == get_current_epoch(state): pending_headers = state.current_epoch_pending_shard_headers else: - assert compute_epoch_at_slot(header.slot) == get_previous_epoch(state): pending_headers = state.previous_epoch_pending_shard_headers - + # Check that this header is not yet in the pending list - for pending_header in pending_headers: - assert header_root != pending_header.root + assert header_root not in [pending_header.root for pending_header in pending_headers] + # Include it in the pending list index = compute_committee_index_from_shard(state, header.slot, header.shard) committee_length = len(get_beacon_committee(state, header.slot, index)) @@ -518,7 +537,7 @@ def process_shard_header(state: BeaconState, commitment=header.commitment, root=header_root, votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - confirmed=False + confirmed=False, )) ``` @@ -565,102 +584,107 @@ def process_epoch(state: BeaconState) -> None: ```python -def process_pending_headers(state: BeaconState): - for slot in range(SLOTS_PER_EPOCH): +def process_pending_headers(state: BeaconState) -> None: + # Pending header processing applies to the previous epoch. + # Skip if `GENESIS_EPOCH` because no prior epoch to process. + if get_current_epoch(state) == GENESIS_EPOCH: + return + + previous_epoch_start_slot = compute_start_slot_at_epoch(get_previous_epoch(state)) + for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): for shard in range(get_active_shard_count(state)): # Pending headers for this (slot, shard) combo candidates = [ - c for c in state.previous_epoch_pending_shard_headers if - (c.slot, c.shard) == (slot, shard) + c for c in state.previous_epoch_pending_shard_headers + if (c.slot, c.shard) == (slot, shard) ] # The entire committee (and its balance) full_committee = get_beacon_committee(state, slot, shard) full_committee_balance = get_total_balance(state, full_committee) - if True not in [c.confirmed for c in candidates]: - # The set of voters who voted for each header - # (and their total balances) - voting_sets = [ - [v for i, v in enumerate(full_committee) if c.votes[i]] - for c in candidates - ] - voting_balances = [ - get_total_balance(state, voters) - for voters in voting_sets - ] - # Get the index with the most total balance voting for them. - # NOTE: if two choices get exactly the same voting balance, - # the candidate earlier in the list wins - if max(voting_balances) > 0: - winning_index = voting_balances.index(max(voting_balances)) - else: - # If no votes, zero wins - winning_index = [c.root for c in candidates].index(Root()) - candidates[winning_index].confirmed = True - for slot in range(SLOTS_PER_EPOCH): + # If any candidates already confirmed, skip + if True in [c.confirmed for c in candidates]: + continue + + # The set of voters who voted for each header (and their total balances) + voting_sets = [ + [v for i, v in enumerate(full_committee) if c.votes[i]] + for c in candidates + ] + voting_balances = [ + get_total_balance(state, voters) + for voters in voting_sets + ] + # Get the index with the most total balance voting for them. + # NOTE: if two choices get exactly the same voting balance, + # the candidate earlier in the list wins + if max(voting_balances) > 0: + winning_index = voting_balances.index(max(voting_balances)) + else: + # If no votes, zero wins + winning_index = [c.root for c in candidates].index(Root()) + candidates[winning_index].confirmed = True + for slot_index in range(SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): - state.grandparent_epoch_confirmed_commitments[shard][slot] = DataCommitment() - for c in state.previous_epoch_pending_shard_headers: - if c.confirmed: - state.grandparent_epoch_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment -``` + state.grandparent_epoch_confirmed_commitments[shard][slot_index] = DataCommitment() + confirmed_headers = [candidate in state.previous_epoch_pending_shard_headers if candidate.confirmed] + for header in confirmed_headers: + state.grandparent_epoch_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment +``` ```python def charge_confirmed_header_fees(state: BeaconState) -> None: new_gasprice = state.shard_gasprice - adjustment_quotient = get_active_shard_count(state, get_current_epoch(state)) * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT - for slot in range(SLOTS_PER_EPOCH): + adjustment_quotient = ( + get_active_shard_count(state, get_current_epoch(state)) + * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT + ) + previous_epoch_start_slot = compute_start_slot_at_epoch(get_previous_epoch(state)) + for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): for shard in range(SHARD_COUNT): confirmed_candidates = [ - c for c in state.previous_epoch_pending_shard_headers if - (c.slot, c.shard, c.confirmed) == (slot, shard, True) + c for c in state.previous_epoch_pending_shard_headers + if (c.slot, c.shard, c.confirmed) == (slot, shard, True) ] - if confirmed_candidates: - candidate = confirmed_candidates[0] - # Charge EIP 1559 fee - proposer = get_shard_proposer(state, slot, shard) - fee = ( - (state.shard_gasprice * candidates[i].commitment.length) // - TARGET_SAMPLES_PER_BLOCK - ) - decrease_balance(state, proposer, fee) - new_gasprice = compute_updated_gasprice( - new_gasprice, - candidates[i].commitment.length, - adjustment_quotient - ) + if not any(confirmed_candidates): + continue + candidate = confirmed_candidates[0] + + # Charge EIP 1559 fee + proposer = get_shard_proposer(state, slot, shard) + fee = ( + (state.shard_gasprice * candidate.commitment.length) + // TARGET_SAMPLES_PER_BLOCK + ) + decrease_balance(state, proposer, fee) + + # Track updated gas price + new_gasprice = compute_updated_gasprice( + new_gasprice, + candidate.commitment.length, + adjustment_quotient, + ) state.shard_gasprice = new_gasprice ``` ```python def reset_pending_headers(state: BeaconState): state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers - shards = [ - compute_shard_from_committee_index(state, slot, index) - for i in range() - state, - attestation.data.index, - attestation.data.slot - ) state.current_epoch_pending_shard_headers = [] - # Add dummy "empty" PendingAttestations - # (default to vote for if no shard header available) - for slot in range(SLOTS_IN_EPOCH): - for index in range(get_committee_count_per_slot(get_current_epoch(state))): + # Add dummy "empty" PendingShardHeader (default vote for if no shard header available) + next_epoch = get_current_epoch(state) + 1 + next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch) + for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_IN_EPOCH): + for index in range(get_committee_count_per_slot(next_epoch) shard = compute_shard_from_committee_index(state, slot, index) - committee_length = len(get_beacon_committee( - state, - header.slot, - header.shard - )) + committee_length = len(get_beacon_committee(state, slot, shard)) state.current_epoch_pending_shard_headers.append(PendingShardHeader( slot=slot, shard=shard, commitment=DataCommitment(), root=Root(), votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - confirmed=False + confirmed=False, )) - ``` #### Custody game updates From be93b03cc8737ecab244e0a46e3096fff7aaaa07 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Mar 2021 22:41:35 +0100 Subject: [PATCH 071/127] pending_commitment.votes -> pending_header.votes --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 8c18c682d..8600fd6ae 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -489,7 +489,7 @@ def update_pending_votes(state: BeaconState, return # Requirement 2: >= 2/3 of balance attesting - participants = get_attesting_indices(state, attestation.data, pending_commitment.votes) + participants = get_attesting_indices(state, attestation.data, pending_header.votes) participants_balance = get_total_balance(state, participants) full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) full_committee_balance = get_total_balance(state, full_committee) From 304e87a98c96bfc1e141b0cf905953e0f911a472 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 11 Jan 2021 07:59:29 -0700 Subject: [PATCH 072/127] typo --- specs/phase1/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 8600fd6ae..37b5c199e 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -541,7 +541,7 @@ def process_shard_header(state: BeaconState, )) ``` -The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`, where `MAX_DEGREE` the the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). +The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`, where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). ### Shard transition processing From be91e59823f4544207a72447f2fcccd2fa415843 Mon Sep 17 00:00:00 2001 From: vbuterin Date: Wed, 9 Dec 2020 15:29:21 +0800 Subject: [PATCH 073/127] DAS phase 1 --- specs/phase1/beacon-chain.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 37b5c199e..2b821735a 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -47,17 +47,18 @@ - [Updated `process_attestation`](#updated-process_attestation) - [`update_pending_votes`](#update_pending_votes) - [`process_shard_header`](#process_shard_header) - - [Shard transition processing](#shard-transition-processing) - [Epoch transition](#epoch-transition) - [Pending headers](#pending-headers) - - [Phase 1 final updates](#phase-1-final-updates) - [Custody game updates](#custody-game-updates) ## Introduction -This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044), using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. +This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, +based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044), +using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design. + ## Custom types @@ -143,6 +144,7 @@ class AttestationData(Container): ```python class BeaconBlock(phase0.BeaconBlock): + # insert phase 0 fields shard_headers: List[SignedShardHeader, MAX_SHARD_HEADERS] ``` @@ -458,8 +460,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: #### `update_pending_votes` ```python -def update_pending_votes(state: BeaconState, - attestation: Attestation) -> None: +def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: # Find and update the PendingShardHeader object, invalid block if pending header not in state if compute_epoch_at_slot(attestation.data.slot) == get_current_epoch(state): pending_headers = state.current_epoch_pending_shard_headers @@ -541,9 +542,10 @@ def process_shard_header(state: BeaconState, )) ``` -The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`, where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). - -### Shard transition processing +The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), +the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`, +where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. +The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). ### Epoch transition @@ -667,7 +669,7 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: ``` ```python -def reset_pending_headers(state: BeaconState): +def reset_pending_headers(state: BeaconState) -> None: state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers state.current_epoch_pending_shard_headers = [] # Add dummy "empty" PendingShardHeader (default vote for if no shard header available) From 6f0b613f0838f061a78d8468e1b10f9e55088db0 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 1 Jan 2021 16:51:24 +0100 Subject: [PATCH 074/127] work in progress DAS network + validator spec --- specs/phase1/beacon-chain.md | 3 +- specs/phase1/data-availability-sampling.md | 65 +++++++++ specs/phase1/p2p-das.md | 162 +++++++++++++++++++++ specs/phase1/validator.md | 43 ++++++ 4 files changed, 272 insertions(+), 1 deletion(-) create mode 100644 specs/phase1/data-availability-sampling.md create mode 100644 specs/phase1/p2p-das.md diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 2b821735a..6ef3cf1d7 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -68,6 +68,7 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `Shard` | `uint64` | A shard number | | `BLSCommitment` | `bytes48` | A G1 curve point | +| `BLSKateProof` | `bytes48` | A G1 curve point | ## Configuration @@ -187,7 +188,7 @@ class ShardHeader(Container): # The actual data commitment commitment: DataCommitment # Proof that the degree < commitment.length - degree_proof: BLSCommitment + degree_proof: BLSKateProof ``` ### `SignedShardHeader` diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md new file mode 100644 index 000000000..2ae5e697b --- /dev/null +++ b/specs/phase1/data-availability-sampling.md @@ -0,0 +1,65 @@ +# Ethereum 2.0 Phase 1 -- Data Availability Sampling + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + +## Custom types + +We define the following Python custom types for type hinting and readability: + +| Name | SSZ equivalent | Description | +| - | - | - | +| `SampleIndex` | `uint64` | A sample index, corresponding to chunk of extended data | +| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` | + + +## New containers + +### DASSample + +```python +class DASSample(Container): + slot: Slot + shard: Shard + index: SampleIndex + proof: BLSKateProof + data: Vector[BLSPoint, POINTS_PER_SAMPLE] +``` + +## Helper functions + +```python +def recover_data(data: Sequence[Optional[Point]]) -> Sequence[Point]: + ... +``` + +## DAS functions + +```python +def extend_data(data: Sequence[Point]) -> Sequence[Point]: + ... +``` + +```python +def unextend_data(extended_data: Sequence[Point]) -> Sequence[Point]: + ... +``` + +```python +def sample_data(extended_data: Sequence[Point]) -> Sequence[DASSample]: + ... +``` + +```python +def verify_sample(sample: DASSample): + ... +``` + +```python +def reconstruct_extended_data(samples: Sequence[DASSample]) -> Sequence[Point]: + ... +``` diff --git a/specs/phase1/p2p-das.md b/specs/phase1/p2p-das.md new file mode 100644 index 000000000..6d31e121b --- /dev/null +++ b/specs/phase1/p2p-das.md @@ -0,0 +1,162 @@ +# Ethereum 2.0 Phase 1 -- Network specification for Data Availability Sampling + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +## Introduction + +For an introduction about DAS itself, see [the DAS section in the Phase 1 validator spec](./validator.md#data-availability-sampling). +This is not a pre-requisite for the network layer, but will give you valuable context. + +For sampling, all nodes need to query for `k` random samples each slot. + +*__TODO__: describe big picture of sampling workload size* + +This is a lot of work, and ideally happens at a low latency. + +To achieve quick querying, the query model is changed to *push* the samples to listeners instead, using GossipSub. +The listeners then randomly rotate their subscriptions to keep queries unpredictable. +Except for a small subset of subscriptions, which will function as a backbone to keep topics more stable. + +Publishing can utilize the fan-out functionality in GossipSub, and is easier to split between nodes: +nodes on the horizontal networks can help by producing the same samples and fan-out publishing to their own peers. + +This push model also helps to obfuscate the original source of a message: +the listeners will not have to make individual queries to some identified source. + +The push model does not aim to serve "historical" queries (anything older than the most recent). +Historical queries are still required for the unhappy case, where messages are not pushed quick enough, +and missing samples are not reconstructed by other nodes on the horizontal subnet quick enough. + +The main challenge in supporting historical queries is to target the right nodes, +without concentrating too many requests on a single node, or breaking the network/consensus identity separation. + +## DAS Subnets + +On a high level, the push-model roles are divided into: +- Sources: create blobs of shard block data, and transformed into many tiny samples. +- Sinks: continuously look for samples + +At full operation, the network has one proposer, per shard, per slot. + +In the push-model, there are: +- *Vertical subnets*: Sinks can subscribe to indices of samples: there is a sample to subnet mapping. +- *Horizontal subnets*: Sources need to distribute samples to all vertical networks: they participate in a fanout layer. + +### Horizontal subnets + +The shift of the distribution responsibility to a proposer can only be achieved with amplification: +a regular proposer cannot reach every vertical subnet. + +#### Publishing + +To publish their work, proposers already put the shard block as a whole on a shard-block subnet. + +The proposer can fan-out their work more aggressively, by using the fan-out functionality of GossipSub: +it may publish to all its peers on the subnet, instead of just those in its mesh. + +#### Horizontal propagation + +Peers on the horizontal subnet are expected to at least perform regular propagation of shard blocks, like how do would participate in any other topic. + +*Although this may be sufficient for testnets, expect parameter changes in the spec here.* + +#### Horizontal to vertical + +Nodes on this same subnet can replicate the sampling efficiently (including a proof for each sample), +and distribute it to any vertical networks that are available to them. + +Since the messages are content-addressed (instead of origin-stamped), +multiple publishers of the same samples on a vertical subnet do not hurt performance, +but actually improve it by shortcutting regular propagation on the vertical subnet, and thus lowering the latency to a sample. + + +### Vertical subnets + +Vertical subnets propagate the samples to every peer that is interested. +These interests are randomly sampled and rotate quickly: although not perfect, +sufficient to avoid any significant amount of nodes from being 100% predictable. + +As soon as a sample is missing after the expected propagation time window, +nodes can divert to the pull-model, or ultimately flag it as unavailable data. + +#### Slow rotation: Backbone + +To allow for subscriptions to rotate quickly and randomly, a backbone is formed to help onboard peers into other topics. + +This backbone is based on a pure function of the *node* identity and time: +- Nodes can be found *without additional discovery overhead*: + peers on a vertical topic can be found by searching the local peerstore for identities that hash to the desired topic(s). +- Nodes can be held accountable for contributing to the backbone: + peers that particpate in DAS but are not active on the appropriate backbone topics can be scored down. + +A node should anticipate backbone topics to subscribe to based their own identity. +These subscriptions rotate slowly, and with different offsets per node identity to avoid sudden network-wide rotations. + +```python +# TODO hash function: (node, time)->subnets +``` + +Backbone subscription work is outlined in the [DAS validator spec](./validator.md#data-availability-sampling) + +#### Quick Rotation: Sampling + +A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS validator spec](./validator.md#data-availability-sampling). +If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore for peers in the topic backbone. + +## DAS in the Gossip domain: Push + +### Topics and messages + +#### Horizontal subnets + + + +#### Vertical subnets + + + +## DAS in the Req-Resp domain: Pull + +To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain. + +This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../phase0/p2p-interface.md). + +Note that the Phase1 DAS networking uses a different protocol prefix: `/eth2/das/req` + +The result codes are extended with: +- 3: **ResourceUnavailable** -- when the request was valid but cannot be served at this point in time. + +TODO: unify with phase0? Lighthoue already defined this in their response codes enum. + +### Messages + +#### DASQuery + +**Protocol ID:** `/eth2/das/req/query/1/` + +Request Content: +``` +( + sample_index: SampleIndex +) +``` + +Response Content: +``` +( + DASSample +) +``` + +When the sample is: +- Available: respond with a `Success` result code, and the encoded sample. +- Expected to be available, but not: respond with a `ResourceUnavailable` result code. +- Not available, but never of interest to the node: respond with an `InvalidRequest` result code. + +When the node is part of the backbone and expected to have the sample, the validity of the quest MUST be recognized with `Success` or `ResourceUnavailable`. diff --git a/specs/phase1/validator.md b/specs/phase1/validator.md index ccc877be0..dce794e1b 100644 --- a/specs/phase1/validator.md +++ b/specs/phase1/validator.md @@ -536,6 +536,49 @@ class SignedLightAggregateAndProof(Container): signature: BLSSignature ``` +## Data Availability Sampling + +### Gossip subscriptions to maintain + +#### Slow rotation: Backbone + +TODO + +#### Quick rotation: Sampling + +TODO + + +### DAS during network instability + +The GossipSub based retrieval of samples may not always work + +#### Waiting on missing samples + +Wait for the sample to re-broadcast. Someone may be slow with publishing, or someone else is able to do the work. + +Any node can do the following work to keep the network healthy: +- Common: Listen on a horizontal subnet, chunkify the block data in samples, and propagate the samples to vertical subnets. +- Extreme: Listen on enough vertical subnets, reconstruct the missing samples by recovery, and propagate the recovered samples. + +This is not a requirement, but should improve the network stability with little resources, and without any central party. + +#### Pulling missing samples + +The more realistic option, to execute when a sample is missing, is to query any node that is known to hold it. +Since *consensus identity is disconnected from network identity*, there is no direct way to contact custody holders +without explicitly asking for the data. + +However, *network identities* are still used to build a backbone for each vertical subnet. +These nodes should have received the samples, and can serve a buffer of them on demand. +Although serving these is not directly incentivised, it is little work: +1. Buffer any message you see on the backbone vertical subnets, for a buffer of up to two weeks. +2. Serve the samples on request. An individual sample is just expected to be `~ 0.5 KB`, and does not require any pre-processing to serve. + +Pulling samples directly from nodes with a custody responsibility, without revealing their identity to the network, is an open problem. + + + ## How to avoid slashing Proposer and Attester slashings described in Phase 0 remain in place with the From e3a7e169f546415465ef0e90a9769af81429b9e4 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 1 Jan 2021 18:52:03 +0100 Subject: [PATCH 075/127] DAS doc --- specs/phase1/data-availability-sampling.md | 72 +++++++++++++++++++--- 1 file changed, 65 insertions(+), 7 deletions(-) diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md index 2ae5e697b..051106e94 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/data-availability-sampling.md @@ -32,8 +32,30 @@ class DASSample(Container): ## Helper functions +### Data extension + +Implementations: +- [Python](https://github.com/protolambda/partial_fft/blob/master/das_fft.py) +- [Go](https://github.com/protolambda/go-kate/blob/master/das_extension.go) + +```python +def das_fft_extension(data: Sequence[Point]) -> Sequence[Point]: + """Given some even-index values of an IFFT input, compute the odd-index inputs, such that the second output half is all zeroes.""" + poly = inverse_fft(data) + return fft(poly + [0]*len(poly))[1::2] +``` + +### Data recovery + +See [Reed-Solomon erasure code recovery in n*log^2(n) time with FFTs](https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039) for theory. +Implementations: +- [Original Python](https://github.com/ethereum/research/blob/master/mimc_stark/recovery.py) +- [New optimized approach in python](https://github.com/ethereum/research/tree/master/polynomial_reconstruction) +- [Old approach in Go](https://github.com/protolambda/go-kate/blob/master/recovery.go) + ```python def recover_data(data: Sequence[Optional[Point]]) -> Sequence[Point]: + """Given an a subset of half or more of the values (missing values are None), recover the None values.""" ... ``` @@ -41,25 +63,61 @@ def recover_data(data: Sequence[Optional[Point]]) -> Sequence[Point]: ```python def extend_data(data: Sequence[Point]) -> Sequence[Point]: - ... + # To force adjacent data into the same proofs, reverse-bit-order the whole list. + evens = [data[reverse_bit_order(i, len(data))] for i in range(len(data))] + # last step of reverse-bit-order: mix in the extended data. + # When undoing the reverse bit order: 1st half matches original data, and 2nd half matches the extension. + odds = das_fft_extension(data) + return [evens[i//2] if i % 2 == 0 else odds[i//2] for i in range(len(data)*2)] ``` ```python def unextend_data(extended_data: Sequence[Point]) -> Sequence[Point]: + return [extended_data[reverse_bit_order(i, len(extended_data))] for i in range(len(extended_data)//2)] +``` + +```python +def check_multi_kate_proof(commitment: BLSCommitment, proof: BLSKateProof, x: Point, ys: Sequence[Point]) -> bool: ... ``` ```python -def sample_data(extended_data: Sequence[Point]) -> Sequence[DASSample]: - ... +def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSKateProof]: + """Constructs proofs for samples of extended data (in polynomial form, 2nd half being zeroes)""" + ... # TODO Use FK20 multi-proof code to construct proofs for a chunk length of POINTS_PER_SAMPLE. ``` ```python -def verify_sample(sample: DASSample): - ... +def sample_data(slot: Slot, shard: Shard, extended_data: Sequence[Point]) -> Sequence[DASSample]: + # TODO: padding of last sample (if not a multiple of POINTS_PER_SAMPLE) + sample_count = len(extended_data) // POINTS_PER_SAMPLE + assert sample_count <= MAX_SAMPLES_PER_BLOCK + proofs = construct_proofs(ifft(extended_data)) + return [ + DASSample( + slot=slot, + shard=shard, + index=i, + proof=proofs[reverse_bit_order(i, sample_count)], # TODO: proof order depends on API of construct_proofs + data=reverse_bit_order_list(extended_data[i*POINTS_PER_SAMPLE:(i+1)*POINTS_PER_SAMPLE]) # TODO: can reorder here, or defer + ) for i in range(sample_count) + ] ``` ```python -def reconstruct_extended_data(samples: Sequence[DASSample]) -> Sequence[Point]: - ... +def verify_sample(sample: DASSample, sample_count: uint64, commitment: BLSCommitment): + domain_pos = reverse_bit_order(sample.index, sample_count) + sample_root_of_unity = ROOT_OF_UNITY**MAX_SAMPLES_PER_BLOCK # change point-level to sample-level domain + x = sample_root_of_unity**domain_pos + assert check_multi_kate_proof(commitment, sample.proof, x, sample.data) +``` + +```python +def reconstruct_extended_data(samples: Sequence[Optional[DASSample]]) -> Sequence[Point]: + extended_data = [None] * (len(samples) * POINTS_PER_SAMPLE) + for sample in samples: + offset = sample.index * POINTS_PER_SAMPLE + for i, p in enumerate(sample.data): + extended_data[offset+i] = p + return recover_data(extended_data) ``` From 91e935d4f35f9c373013365f9d97ef7ed6a91378 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 1 Jan 2021 21:52:29 +0100 Subject: [PATCH 076/127] more DAS spec work: DAS function signatures, gossip details --- specs/phase1/beacon-chain.md | 5 ++ specs/phase1/data-availability-sampling.md | 27 ++++++++++ specs/phase1/p2p-das.md | 59 +++++++++++++++++++++- specs/phase1/validator.md | 4 ++ 4 files changed, 93 insertions(+), 2 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 6ef3cf1d7..8e81ae237 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -145,7 +145,10 @@ class AttestationData(Container): ```python class BeaconBlock(phase0.BeaconBlock): +<<<<<<< HEAD # insert phase 0 fields +======= +>>>>>>> 3c19069e (more DAS spec work: DAS function signatures, gossip details) shard_headers: List[SignedShardHeader, MAX_SHARD_HEADERS] ``` @@ -191,6 +194,8 @@ class ShardHeader(Container): degree_proof: BLSKateProof ``` +TODO: add shard-proposer-index to shard headers, similar to optimization done with beacon-blocks. + ### `SignedShardHeader` ```python diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md index 051106e94..5a62b202e 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/data-availability-sampling.md @@ -30,6 +30,33 @@ class DASSample(Container): data: Vector[BLSPoint, POINTS_PER_SAMPLE] ``` +### ShardBlob + +The blob of data, effectively a block. Network-only. + +```python +class ShardBlob(Container): + # Slot and shard that this blob is intended for + slot: Slot + shard: Shard + # The actual data + data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] +``` + +Note that the hash-tree-root of the `ShardBlob` does not match the `ShardHeader`, +since the blob deals with full data, whereas the header includes the Kate commitment instead. + +### SignedShardBlob + +Network-only. + +```python +class ShardBlob(Container): + message: ShardBlob + signature: BLSSignature +``` + + ## Helper functions ### Data extension diff --git a/specs/phase1/p2p-das.md b/specs/phase1/p2p-das.md index 6d31e121b..b72340498 100644 --- a/specs/phase1/p2p-das.md +++ b/specs/phase1/p2p-das.md @@ -85,6 +85,12 @@ sufficient to avoid any significant amount of nodes from being 100% predictable. As soon as a sample is missing after the expected propagation time window, nodes can divert to the pull-model, or ultimately flag it as unavailable data. +Note that the vertical subnets are shared between the different shards, +and a simple hash function `(shard, slot, sample_index) -> subnet_index` defines which samples go where. +This is to evenly distribute samples to subnets, even when one shard has more activity than the other. + +TODO: define `(shard, slot, sample_index) -> subnet_index` hash function. + #### Slow rotation: Backbone To allow for subscriptions to rotate quickly and randomly, a backbone is formed to help onboard peers into other topics. @@ -113,12 +119,61 @@ If the node does not already have connected peers on the topic it needs to sampl ### Topics and messages -#### Horizontal subnets +Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: +| Name | Message Type | +|----------------------------------|---------------------------| +| `shard_blob_{shard}` | `SignedShardBlob` | +| `shard_header_{shard}` | `SignedShardHeader` | +| `das_sample_{subnet_index}` | `DASSample` | +TODO: separate phase 1 network spec. +#### Horizontal subnets: `shard_blob_{shard}` -#### Vertical subnets +Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets. +If participating in DAS, upon receiving a `blob` for the first time, with a `slot` not older than `MAX_RESAMPLE_TIME`, +a subscriber of a `shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical subnets. +1. Extend the data: `extended_data = extend_data(blob.data)` +2. Create samples with proofs: `samples = sample_data(blob.slot, blob.shard, extended_data)` +3. Fanout-publish the samples to the vertical subnets of its peers (not all vertical subnets may be reached). + +The [DAS validator spec](./validator.md#data-availability-sampling) outlines when and where to participate in DAS on horizontal subnets. + +The following validations MUST pass before forwarding the `blob` on the horizontal subnet or creating samples for it. +- _[REJECT]_ `blob.message.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) +- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `blob.message.slot <= current_slot` + (a client MAY queue future blobs for processing at the appropriate slot). +- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)`: `blob.message.slot`. +- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. +- _[REJECT]_ The `blob.message.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. +- _[REJECT]_ The proposer signature, `blob.signature`, is valid with respect to the `proposer_index` pubkey. +- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot + +TODO: define a blob header (note: hash-tree-root instead of commitment data) and make double blob proposals slashable? + +#### Vertical subnets: `das_sample_{subnet_index}` + +Shard blob samples can be verified with just a 48 byte Kate proof, against the commitment specific to that `(shard, slot)` key. + +The following validations MUST pass before forwarding the `sample` on the vertical subnet. +- _[IGNORE]_ The commitment for the (`sample.shard`, `sample.slot`, `sample.index`) tuple must be known. + If not known, the client MAY queue the sample, if it passes formatting conditions. +- _[REJECT]_ `sample.shard`, `sample.slot` and `sample.index` are hashed into a `sbunet_index` (TODO: define hash) which MUST match the topic `{subnet_index}` parameter. +- _[REJECT]_ `sample.shard` must be within valid range: `0 <= sample.shard < get_active_shard_count(state, compute_epoch_at_slot(sample.slot))`. +- _[REJECT]_ `sample.index` must be within valid range: `0 <= sample.index < sample_count`, where: + - `sample_count = (points_count + POINTS_PER_SAMPLE - 1) // POINTS_PER_SAMPLE` + - `points_count` is the length as claimed along with the commitment, which must be smaller than `MAX_SAMPLES_PER_BLOCK`. +- _[IGNORE]_ The `sample` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `sample.slot <= current_slot`. A client MAY queue future samples for processing at the appropriate slot, if it passed formatting conditions. +- _[IGNORE]_ This is the first received sample with the (`sample.shard`, `sample.slot`, `sample.index`) key tuple. +- _[REJECT]_ As already limited by the SSZ list-limit, it is important the sample data is well-formatted and not too large. +- _[REJECT]_ The `sample.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. +- _[REJECT]_ The `sample.proof` MUST be valid: `verify_sample(sample, sample_count, commitment)` + +Upon receiving a valid sample, it SHOULD be retained for a buffer period, if the local node is part of the backbone that covers this sample. +This is to serve other peers that may have missed it. ## DAS in the Req-Resp domain: Pull diff --git a/specs/phase1/validator.md b/specs/phase1/validator.md index dce794e1b..58ffcf6eb 100644 --- a/specs/phase1/validator.md +++ b/specs/phase1/validator.md @@ -575,6 +575,10 @@ Although serving these is not directly incentivised, it is little work: 1. Buffer any message you see on the backbone vertical subnets, for a buffer of up to two weeks. 2. Serve the samples on request. An individual sample is just expected to be `~ 0.5 KB`, and does not require any pre-processing to serve. +A validator SHOULD make a `DASQuery` request to random peers, until failing more than the configured failure-rate. + +TODO: detailed failure-mode spec. Stop after trying e.g. 3 peers for any sample in a configured time window (after the gossip period). + Pulling samples directly from nodes with a custody responsibility, without revealing their identity to the network, is an open problem. From a02f856e54820baee4ed46affa9830596bf76a85 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 2 Jan 2021 14:25:31 +0100 Subject: [PATCH 077/127] update bit-reverse-ordering/presentation + shard blob signature verification --- specs/phase1/beacon-chain.md | 11 --- specs/phase1/data-availability-sampling.md | 103 +++++++++++++++------ specs/phase1/p2p-das.md | 17 ++-- 3 files changed, 86 insertions(+), 45 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 8e81ae237..0cff0a18f 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -232,17 +232,6 @@ def next_power_of_two(x): return 2 ** ((x - 1).bit_length()) ``` -#### `reverse_bit_order` - -```python -def reverse_bit_order(n, order): - """ - Reverse the bit order of an integer n - """ - assert is_power_of_two(order) - return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2) -``` - #### `compute_previous_slot` ```python diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md index 5a62b202e..226a2fdb1 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/data-availability-sampling.md @@ -17,6 +17,15 @@ We define the following Python custom types for type hinting and readability: | `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` | +## Configuration + +### Misc + +| Name | Value | Notes | +| - | - | - | +| `MAX_RESAMPLE_TIME` | `TODO` (= TODO) | Time window to sample a shard blob and put it on vertical subnets | + + ## New containers ### DASSample @@ -51,14 +60,37 @@ since the blob deals with full data, whereas the header includes the Kate commit Network-only. ```python -class ShardBlob(Container): - message: ShardBlob +class SignedShardBlob(Container): + blob: ShardBlob + # The signature, the message is the commitment on the blob signature: BLSSignature ``` ## Helper functions +### Reverse bit ordering + +#### `reverse_bit_order` + +```python +def reverse_bit_order(n: int, order: int): + """ + Reverse the bit order of an integer n + """ + assert is_power_of_two(order) + return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2) +``` + +#### `reverse_bit_order_list` + +```python +def reverse_bit_order_list(elements: Sequence[int]) -> Sequence[int]: + order = len(elements) + assert is_power_of_two(order) + return [elements[reverse_bit_order(i, order)] for i in range(order)] +``` + ### Data extension Implementations: @@ -67,7 +99,10 @@ Implementations: ```python def das_fft_extension(data: Sequence[Point]) -> Sequence[Point]: - """Given some even-index values of an IFFT input, compute the odd-index inputs, such that the second output half is all zeroes.""" + """ + Given some even-index values of an IFFT input, compute the odd-index inputs, + such that the second output half of the IFFT is all zeroes. + """ poly = inverse_fft(data) return fft(poly + [0]*len(poly))[1::2] ``` @@ -81,8 +116,8 @@ Implementations: - [Old approach in Go](https://github.com/protolambda/go-kate/blob/master/recovery.go) ```python -def recover_data(data: Sequence[Optional[Point]]) -> Sequence[Point]: - """Given an a subset of half or more of the values (missing values are None), recover the None values.""" +def recover_data(data: Sequence[Optional[Sequence[Point]]]) -> Sequence[Point]: + """Given an a subset of half or more of subgroup-aligned ranges of values, recover the None values.""" ... ``` @@ -90,43 +125,61 @@ def recover_data(data: Sequence[Optional[Point]]) -> Sequence[Point]: ```python def extend_data(data: Sequence[Point]) -> Sequence[Point]: - # To force adjacent data into the same proofs, reverse-bit-order the whole list. - evens = [data[reverse_bit_order(i, len(data))] for i in range(len(data))] - # last step of reverse-bit-order: mix in the extended data. - # When undoing the reverse bit order: 1st half matches original data, and 2nd half matches the extension. - odds = das_fft_extension(data) - return [evens[i//2] if i % 2 == 0 else odds[i//2] for i in range(len(data)*2)] + """ + The input data gets reverse-bit-ordered, such that the first half of the final output matches the original data. + We calculated the odd-index values with the DAS FFT extension, reverse-bit-order to put them in the second half. + """ + rev_bit_odds = reverse_bit_order_list(das_fft_extension(reverse_bit_order_list(data))) + return data + rev_bit_odds ``` ```python def unextend_data(extended_data: Sequence[Point]) -> Sequence[Point]: - return [extended_data[reverse_bit_order(i, len(extended_data))] for i in range(len(extended_data)//2)] + return extended_data[:len(extended_data)//2] ``` ```python def check_multi_kate_proof(commitment: BLSCommitment, proof: BLSKateProof, x: Point, ys: Sequence[Point]) -> bool: - ... + """ + Run a KZG multi-proof check to verify that for the subgroup starting at x, + the proof indeed complements the ys to match the commitment. + """ + ... # Omitted for now, refer to Kate implementation resources. ``` ```python def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSKateProof]: - """Constructs proofs for samples of extended data (in polynomial form, 2nd half being zeroes)""" - ... # TODO Use FK20 multi-proof code to construct proofs for a chunk length of POINTS_PER_SAMPLE. + """ + Constructs proofs for samples of extended data (in polynomial form, 2nd half being zeroes). + Use the FK20 multi-proof approach to construct proofs for a chunk length of POINTS_PER_SAMPLE. + """ + ... # Omitted for now, refer to Kate implementation resources. +``` + +```python +def commit_to_data(data_as_poly: Sequence[Point]) -> Sequence[BLSCommitment]: + """Commit to a polynomial by """ ``` ```python def sample_data(slot: Slot, shard: Shard, extended_data: Sequence[Point]) -> Sequence[DASSample]: - # TODO: padding of last sample (if not a multiple of POINTS_PER_SAMPLE) sample_count = len(extended_data) // POINTS_PER_SAMPLE assert sample_count <= MAX_SAMPLES_PER_BLOCK - proofs = construct_proofs(ifft(extended_data)) + # get polynomial form of full extended data, second half will be all zeroes. + poly = ifft(reverse_bit_order_list(extended_data)) + assert all(v == 0 for v in poly[len(poly)//2:]) + proofs = construct_proofs(poly) return [ DASSample( slot=slot, shard=shard, + # The proof applies to `x = w ** (reverse_bit_order(i, sample_count) * POINTS_PER_SAMPLE)` index=i, - proof=proofs[reverse_bit_order(i, sample_count)], # TODO: proof order depends on API of construct_proofs - data=reverse_bit_order_list(extended_data[i*POINTS_PER_SAMPLE:(i+1)*POINTS_PER_SAMPLE]) # TODO: can reorder here, or defer + # The computed proofs match the reverse_bit_order_list(extended_data), undo that to get the right proof. + proof=proofs[reverse_bit_order(i, sample_count)], + # note: we leave the sample data as-is so it matches the original nicely. + # The proof applies to `ys = reverse_bit_order_list(sample.data)` + data=extended_data[i*POINTS_PER_SAMPLE:(i+1)*POINTS_PER_SAMPLE] ) for i in range(sample_count) ] ``` @@ -136,15 +189,13 @@ def verify_sample(sample: DASSample, sample_count: uint64, commitment: BLSCommit domain_pos = reverse_bit_order(sample.index, sample_count) sample_root_of_unity = ROOT_OF_UNITY**MAX_SAMPLES_PER_BLOCK # change point-level to sample-level domain x = sample_root_of_unity**domain_pos - assert check_multi_kate_proof(commitment, sample.proof, x, sample.data) + ys = reverse_bit_order_list(sample.data) + assert check_multi_kate_proof(commitment, sample.proof, x, ys) ``` ```python def reconstruct_extended_data(samples: Sequence[Optional[DASSample]]) -> Sequence[Point]: - extended_data = [None] * (len(samples) * POINTS_PER_SAMPLE) - for sample in samples: - offset = sample.index * POINTS_PER_SAMPLE - for i, p in enumerate(sample.data): - extended_data[offset+i] = p - return recover_data(extended_data) + # Instead of recovering with a point-by-point approach, recover the samples by recovering missing subgroups. + subgroups = [None if sample is None else reverse_bit_order_list(sample.data) for sample in samples] + return recover_data(subgroups) ``` diff --git a/specs/phase1/p2p-das.md b/specs/phase1/p2p-das.md index b72340498..f2645b8ac 100644 --- a/specs/phase1/p2p-das.md +++ b/specs/phase1/p2p-das.md @@ -132,26 +132,27 @@ TODO: separate phase 1 network spec. Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets. -If participating in DAS, upon receiving a `blob` for the first time, with a `slot` not older than `MAX_RESAMPLE_TIME`, +If participating in DAS, upon receiving a `signed_blob` for the first time, with a `slot` not older than `MAX_RESAMPLE_TIME`, a subscriber of a `shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical subnets. +Take `blob = signed_blob.blob`: 1. Extend the data: `extended_data = extend_data(blob.data)` 2. Create samples with proofs: `samples = sample_data(blob.slot, blob.shard, extended_data)` 3. Fanout-publish the samples to the vertical subnets of its peers (not all vertical subnets may be reached). The [DAS validator spec](./validator.md#data-availability-sampling) outlines when and where to participate in DAS on horizontal subnets. -The following validations MUST pass before forwarding the `blob` on the horizontal subnet or creating samples for it. -- _[REJECT]_ `blob.message.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) +The following validations MUST pass before forwarding the `signed_blob` (with inner `blob`) on the horizontal subnet or creating samples for it. +- _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) - _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `blob.message.slot <= current_slot` + i.e. validate that `blob.slot <= current_slot` (a client MAY queue future blobs for processing at the appropriate slot). -- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)`: `blob.message.slot`. +- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)`: `blob.slot`. - _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. -- _[REJECT]_ The `blob.message.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. -- _[REJECT]_ The proposer signature, `blob.signature`, is valid with respect to the `proposer_index` pubkey. +- _[REJECT]_ The `blob.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. +- _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey, signed over the SSZ output of `commit_to_data(blob.data)`. - _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot -TODO: define a blob header (note: hash-tree-root instead of commitment data) and make double blob proposals slashable? +TODO: make double blob proposals slashable? #### Vertical subnets: `das_sample_{subnet_index}` From 334c88961cfa0a644ec481a479eddf2a889ffc03 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 2 Jan 2021 14:32:30 +0100 Subject: [PATCH 078/127] toc --- specs/phase1/beacon-chain.md | 1 - specs/phase1/data-availability-sampling.md | 17 +++++++++++++++ specs/phase1/p2p-das.md | 25 ++++++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 0cff0a18f..744823567 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -28,7 +28,6 @@ - [Helper functions](#helper-functions) - [Misc](#misc-1) - [`next_power_of_two`](#next_power_of_two) - - [`reverse_bit_order`](#reverse_bit_order) - [`compute_previous_slot`](#compute_previous_slot) - [`compute_updated_gasprice`](#compute_updated_gasprice) - [`compute_committee_source_epoch`](#compute_committee_source_epoch) diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md index 226a2fdb1..c331c6dde 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/data-availability-sampling.md @@ -5,6 +5,23 @@ ## Table of contents + + +- [Custom types](#custom-types) +- [Configuration](#configuration) + - [Misc](#misc) +- [New containers](#new-containers) + - [DASSample](#dassample) + - [ShardBlob](#shardblob) + - [SignedShardBlob](#signedshardblob) +- [Helper functions](#helper-functions) + - [Reverse bit ordering](#reverse-bit-ordering) + - [`reverse_bit_order`](#reverse_bit_order) + - [`reverse_bit_order_list`](#reverse_bit_order_list) + - [Data extension](#data-extension) + - [Data recovery](#data-recovery) +- [DAS functions](#das-functions) + ## Custom types diff --git a/specs/phase1/p2p-das.md b/specs/phase1/p2p-das.md index f2645b8ac..225053b89 100644 --- a/specs/phase1/p2p-das.md +++ b/specs/phase1/p2p-das.md @@ -1,3 +1,28 @@ + + +**Table of Contents** + +- [Ethereum 2.0 Phase 1 -- Network specification for Data Availability Sampling](#ethereum-20-phase-1----network-specification-for-data-availability-sampling) + - [Table of contents](#table-of-contents) + - [Introduction](#introduction) + - [DAS Subnets](#das-subnets) + - [Horizontal subnets](#horizontal-subnets) + - [Publishing](#publishing) + - [Horizontal propagation](#horizontal-propagation) + - [Horizontal to vertical](#horizontal-to-vertical) + - [Vertical subnets](#vertical-subnets) + - [Slow rotation: Backbone](#slow-rotation-backbone) + - [Quick Rotation: Sampling](#quick-rotation-sampling) + - [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push) + - [Topics and messages](#topics-and-messages) + - [Horizontal subnets: `shard_blob_{shard}`](#horizontal-subnets-shard_blob_shard) + - [Vertical subnets: `das_sample_{subnet_index}`](#vertical-subnets-das_sample_subnet_index) + - [DAS in the Req-Resp domain: Pull](#das-in-the-req-resp-domain-pull) + - [Messages](#messages) + - [DASQuery](#dasquery) + + + # Ethereum 2.0 Phase 1 -- Network specification for Data Availability Sampling **Notice**: This document is a work-in-progress for researchers and implementers. From 5e57ff0ab18af37b72c9920f1d9e1103e0ce155b Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Sat, 2 Jan 2021 22:06:57 +0100 Subject: [PATCH 079/127] Update specs/phase1/data-availability-sampling.md Co-authored-by: dankrad --- specs/phase1/data-availability-sampling.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md index c331c6dde..d343fb0d6 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/data-availability-sampling.md @@ -174,7 +174,7 @@ def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSKate ``` ```python -def commit_to_data(data_as_poly: Sequence[Point]) -> Sequence[BLSCommitment]: +def commit_to_data(data_as_poly: Sequence[Point]) -> BLSCommitment: """Commit to a polynomial by """ ``` From a1831940ea1525eadfa347e147284a14a422d72e Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 2 Jan 2021 22:31:25 +0100 Subject: [PATCH 080/127] adjustments based on review by @dankrad --- specs/phase1/beacon-chain.md | 3 +-- specs/phase1/data-availability-sampling.md | 14 +++++++------- specs/phase1/p2p-das.md | 3 ++- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index 744823567..aa4c38a2f 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -67,7 +67,6 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `Shard` | `uint64` | A shard number | | `BLSCommitment` | `bytes48` | A G1 curve point | -| `BLSKateProof` | `bytes48` | A G1 curve point | ## Configuration @@ -190,7 +189,7 @@ class ShardHeader(Container): # The actual data commitment commitment: DataCommitment # Proof that the degree < commitment.length - degree_proof: BLSKateProof + degree_proof: BLSCommitment ``` TODO: add shard-proposer-index to shard headers, similar to optimization done with beacon-blocks. diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/data-availability-sampling.md index d343fb0d6..119d619a5 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/data-availability-sampling.md @@ -52,7 +52,7 @@ class DASSample(Container): slot: Slot shard: Shard index: SampleIndex - proof: BLSKateProof + proof: BLSCommitment data: Vector[BLSPoint, POINTS_PER_SAMPLE] ``` @@ -70,7 +70,7 @@ class ShardBlob(Container): ``` Note that the hash-tree-root of the `ShardBlob` does not match the `ShardHeader`, -since the blob deals with full data, whereas the header includes the Kate commitment instead. +since the blob deals with full data, whereas the header includes the KZG commitment instead. ### SignedShardBlob @@ -156,21 +156,21 @@ def unextend_data(extended_data: Sequence[Point]) -> Sequence[Point]: ``` ```python -def check_multi_kate_proof(commitment: BLSCommitment, proof: BLSKateProof, x: Point, ys: Sequence[Point]) -> bool: +def check_multi_kzg_proof(commitment: BLSCommitment, proof: BLSCommitment, x: Point, ys: Sequence[Point]) -> bool: """ Run a KZG multi-proof check to verify that for the subgroup starting at x, the proof indeed complements the ys to match the commitment. """ - ... # Omitted for now, refer to Kate implementation resources. + ... # Omitted for now, refer to KZG implementation resources. ``` ```python -def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSKateProof]: +def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSCommitment]: """ Constructs proofs for samples of extended data (in polynomial form, 2nd half being zeroes). Use the FK20 multi-proof approach to construct proofs for a chunk length of POINTS_PER_SAMPLE. """ - ... # Omitted for now, refer to Kate implementation resources. + ... # Omitted for now, refer to KZG implementation resources. ``` ```python @@ -207,7 +207,7 @@ def verify_sample(sample: DASSample, sample_count: uint64, commitment: BLSCommit sample_root_of_unity = ROOT_OF_UNITY**MAX_SAMPLES_PER_BLOCK # change point-level to sample-level domain x = sample_root_of_unity**domain_pos ys = reverse_bit_order_list(sample.data) - assert check_multi_kate_proof(commitment, sample.proof, x, ys) + assert check_multi_kzg_proof(commitment, sample.proof, x, ys) ``` ```python diff --git a/specs/phase1/p2p-das.md b/specs/phase1/p2p-das.md index 225053b89..b26264982 100644 --- a/specs/phase1/p2p-das.md +++ b/specs/phase1/p2p-das.md @@ -181,7 +181,8 @@ TODO: make double blob proposals slashable? #### Vertical subnets: `das_sample_{subnet_index}` -Shard blob samples can be verified with just a 48 byte Kate proof, against the commitment specific to that `(shard, slot)` key. +Shard blob samples can be verified with just a 48 byte KZG proof (commitment quotient polynomial), +against the commitment to blob polynomial, specific to that `(shard, slot)` key. The following validations MUST pass before forwarding the `sample` on the vertical subnet. - _[IGNORE]_ The commitment for the (`sample.shard`, `sample.slot`, `sample.index`) tuple must be known. From a8c9cfbe8438a3a0370608008e2999f05aa11564 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 4 Jan 2021 11:17:30 -0700 Subject: [PATCH 081/127] minor cleanups/fixes to shard data avail PR --- specs/phase1/beacon-chain.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/specs/phase1/beacon-chain.md b/specs/phase1/beacon-chain.md index aa4c38a2f..782db226f 100644 --- a/specs/phase1/beacon-chain.md +++ b/specs/phase1/beacon-chain.md @@ -143,10 +143,7 @@ class AttestationData(Container): ```python class BeaconBlock(phase0.BeaconBlock): -<<<<<<< HEAD # insert phase 0 fields -======= ->>>>>>> 3c19069e (more DAS spec work: DAS function signatures, gossip details) shard_headers: List[SignedShardHeader, MAX_SHARD_HEADERS] ``` From c6af2ec68728731d32cc6fdc0fd9d46f8a084819 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Mon, 4 Jan 2021 21:55:11 +0100 Subject: [PATCH 082/127] Code review - apply suggestions to DAS doc Co-authored-by: Danny Ryan --- specs/phase1/p2p-das.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/phase1/p2p-das.md b/specs/phase1/p2p-das.md index b26264982..6057391e9 100644 --- a/specs/phase1/p2p-das.md +++ b/specs/phase1/p2p-das.md @@ -46,13 +46,13 @@ This is a lot of work, and ideally happens at a low latency. To achieve quick querying, the query model is changed to *push* the samples to listeners instead, using GossipSub. The listeners then randomly rotate their subscriptions to keep queries unpredictable. -Except for a small subset of subscriptions, which will function as a backbone to keep topics more stable. +Except for a small subset of subscriptions, which will function as a backbone to keep topics more stable and allow for efficient peer discovery. Publishing can utilize the fan-out functionality in GossipSub, and is easier to split between nodes: nodes on the horizontal networks can help by producing the same samples and fan-out publishing to their own peers. This push model also helps to obfuscate the original source of a message: -the listeners will not have to make individual queries to some identified source. +the listeners do not have to make individual queries to some identified source. The push model does not aim to serve "historical" queries (anything older than the most recent). Historical queries are still required for the unhappy case, where messages are not pushed quick enough, @@ -80,7 +80,7 @@ a regular proposer cannot reach every vertical subnet. #### Publishing -To publish their work, proposers already put the shard block as a whole on a shard-block subnet. +To publish their work, proposers propagate the shard block as a whole on a shard-block subnet. The proposer can fan-out their work more aggressively, by using the fan-out functionality of GossipSub: it may publish to all its peers on the subnet, instead of just those in its mesh. @@ -138,7 +138,7 @@ Backbone subscription work is outlined in the [DAS validator spec](./validator.m #### Quick Rotation: Sampling A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS validator spec](./validator.md#data-availability-sampling). -If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore for peers in the topic backbone. +If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore, and if necessary in the DHT, for peers in the topic backbone. ## DAS in the Gossip domain: Push @@ -175,7 +175,7 @@ The following validations MUST pass before forwarding the `signed_blob` (with in - _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. - _[REJECT]_ The `blob.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. - _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey, signed over the SSZ output of `commit_to_data(blob.data)`. -- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot +- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot. TODO: make double blob proposals slashable? From 4c5afb92f41c0e67458bd225526baeecb0eea132 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 4 Jan 2021 22:22:17 +0100 Subject: [PATCH 083/127] refactor/polish style of DAS docs, move DAS validator work to new doc --- ...ilability-sampling.md => das-internals.md} | 2 +- specs/phase1/{p2p-das.md => das-p2p.md} | 24 +++--- specs/phase1/das-participation.md | 74 +++++++++++++++++++ specs/phase1/validator.md | 47 ------------ 4 files changed, 85 insertions(+), 62 deletions(-) rename specs/phase1/{data-availability-sampling.md => das-internals.md} (99%) rename specs/phase1/{p2p-das.md => das-p2p.md} (95%) create mode 100644 specs/phase1/das-participation.md diff --git a/specs/phase1/data-availability-sampling.md b/specs/phase1/das-internals.md similarity index 99% rename from specs/phase1/data-availability-sampling.md rename to specs/phase1/das-internals.md index 119d619a5..fad656e29 100644 --- a/specs/phase1/data-availability-sampling.md +++ b/specs/phase1/das-internals.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Data Availability Sampling +# Ethereum 2.0 Phase 1 -- Data Availability Sampling - Internals **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/phase1/p2p-das.md b/specs/phase1/das-p2p.md similarity index 95% rename from specs/phase1/p2p-das.md rename to specs/phase1/das-p2p.md index 6057391e9..63206ed78 100644 --- a/specs/phase1/p2p-das.md +++ b/specs/phase1/das-p2p.md @@ -1,6 +1,11 @@ +# Ethereum 2.0 Phase 1 -- Data Availability Sampling - Network specification + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + -**Table of Contents** - [Ethereum 2.0 Phase 1 -- Network specification for Data Availability Sampling](#ethereum-20-phase-1----network-specification-for-data-availability-sampling) - [Table of contents](#table-of-contents) @@ -23,19 +28,10 @@ -# Ethereum 2.0 Phase 1 -- Network specification for Data Availability Sampling - -**Notice**: This document is a work-in-progress for researchers and implementers. - -## Table of contents - - - - ## Introduction -For an introduction about DAS itself, see [the DAS section in the Phase 1 validator spec](./validator.md#data-availability-sampling). +For an introduction about DAS itself, see [the DAS participation spec](./das-participation.md#data-availability-sampling). This is not a pre-requisite for the network layer, but will give you valuable context. For sampling, all nodes need to query for `k` random samples each slot. @@ -133,11 +129,11 @@ These subscriptions rotate slowly, and with different offsets per node identity # TODO hash function: (node, time)->subnets ``` -Backbone subscription work is outlined in the [DAS validator spec](./validator.md#data-availability-sampling) +Backbone subscription work is outlined in the [DAS participation spec](./das-participation.md#slow-rotation-backbone) #### Quick Rotation: Sampling -A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS validator spec](./validator.md#data-availability-sampling). +A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS participation spec](./das-participation.md#quick-rotation-sampling). If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore, and if necessary in the DHT, for peers in the topic backbone. ## DAS in the Gossip domain: Push @@ -164,7 +160,7 @@ Take `blob = signed_blob.blob`: 2. Create samples with proofs: `samples = sample_data(blob.slot, blob.shard, extended_data)` 3. Fanout-publish the samples to the vertical subnets of its peers (not all vertical subnets may be reached). -The [DAS validator spec](./validator.md#data-availability-sampling) outlines when and where to participate in DAS on horizontal subnets. +The [DAS participation spec](./das-participation.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets. The following validations MUST pass before forwarding the `signed_blob` (with inner `blob`) on the horizontal subnet or creating samples for it. - _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) diff --git a/specs/phase1/das-participation.md b/specs/phase1/das-participation.md new file mode 100644 index 000000000..12e53049b --- /dev/null +++ b/specs/phase1/das-participation.md @@ -0,0 +1,74 @@ +# Ethereum 2.0 Phase 1 -- Data Availability Sampling - Participation + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +TODO + + + + +## Data Availability Sampling + +TODO: Summary of Data Availability problem + +TODO: Summary of solution, why 2x extension, and randomized samples + +## GossipSub + +### Horizontal subnets + +TODO + +### Vertical subnets + +#### Slow rotation: Backbone + +TODO + +#### Quick rotation: Sampling + +TODO + + +### DAS during network instability + +The GossipSub based retrieval of samples may not always work. +In such event, a node can move through below stages until it recovers data availability. + +#### Stage 0: Waiting on missing samples + +Wait for the sample to re-broadcast. Someone may be slow with publishing, or someone else is able to do the work. + +Any node can do the following work to keep the network healthy: +- Common: Listen on a horizontal subnet, chunkify the block data in samples, and propagate the samples to vertical subnets. +- Extreme: Listen on enough vertical subnets, reconstruct the missing samples by recovery, and propagate the recovered samples. + +This is not a requirement, but should improve the network stability with little resources, and without any central party. + +#### Stage 1: Pulling missing samples from known peers + +The more realistic option, to execute when a sample is missing, is to query any node that is known to hold it. +Since *consensus identity is disconnected from network identity*, there is no direct way to contact custody holders +without explicitly asking for the data. + +However, *network identities* are still used to build a backbone for each vertical subnet. +These nodes should have received the samples, and can serve a buffer of them on demand. +Although serving these is not directly incentivised, it is little work: +1. Buffer any message you see on the backbone vertical subnets, for a buffer of up to two weeks. +2. Serve the samples on request. An individual sample is just expected to be `~ 0.5 KB`, and does not require any pre-processing to serve. + +A validator SHOULD make a `DASQuery` request to random peers, until failing more than the configured failure-rate. + +TODO: detailed failure-mode spec. Stop after trying e.g. 3 peers for any sample in a configured time window (after the gossip period). + +#### Stage 2: Pulling missing data from validators with custody. + +Pulling samples directly from nodes with validators that have a custody responsibility, +without revealing their identity to the network, is an open problem. + diff --git a/specs/phase1/validator.md b/specs/phase1/validator.md index 58ffcf6eb..ccc877be0 100644 --- a/specs/phase1/validator.md +++ b/specs/phase1/validator.md @@ -536,53 +536,6 @@ class SignedLightAggregateAndProof(Container): signature: BLSSignature ``` -## Data Availability Sampling - -### Gossip subscriptions to maintain - -#### Slow rotation: Backbone - -TODO - -#### Quick rotation: Sampling - -TODO - - -### DAS during network instability - -The GossipSub based retrieval of samples may not always work - -#### Waiting on missing samples - -Wait for the sample to re-broadcast. Someone may be slow with publishing, or someone else is able to do the work. - -Any node can do the following work to keep the network healthy: -- Common: Listen on a horizontal subnet, chunkify the block data in samples, and propagate the samples to vertical subnets. -- Extreme: Listen on enough vertical subnets, reconstruct the missing samples by recovery, and propagate the recovered samples. - -This is not a requirement, but should improve the network stability with little resources, and without any central party. - -#### Pulling missing samples - -The more realistic option, to execute when a sample is missing, is to query any node that is known to hold it. -Since *consensus identity is disconnected from network identity*, there is no direct way to contact custody holders -without explicitly asking for the data. - -However, *network identities* are still used to build a backbone for each vertical subnet. -These nodes should have received the samples, and can serve a buffer of them on demand. -Although serving these is not directly incentivised, it is little work: -1. Buffer any message you see on the backbone vertical subnets, for a buffer of up to two weeks. -2. Serve the samples on request. An individual sample is just expected to be `~ 0.5 KB`, and does not require any pre-processing to serve. - -A validator SHOULD make a `DASQuery` request to random peers, until failing more than the configured failure-rate. - -TODO: detailed failure-mode spec. Stop after trying e.g. 3 peers for any sample in a configured time window (after the gossip period). - -Pulling samples directly from nodes with a custody responsibility, without revealing their identity to the network, is an open problem. - - - ## How to avoid slashing Proposer and Attester slashings described in Phase 0 remain in place with the From 8e21a31d0fdf781467c5c29fdbaff244aa5a6cda Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 4 Jan 2021 22:24:34 +0100 Subject: [PATCH 084/127] DAS docs TOC updates --- specs/phase1/das-p2p.md | 35 +++++++++++++++---------------- specs/phase1/das-participation.md | 12 ++++++++++- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/specs/phase1/das-p2p.md b/specs/phase1/das-p2p.md index 63206ed78..016d1a591 100644 --- a/specs/phase1/das-p2p.md +++ b/specs/phase1/das-p2p.md @@ -7,24 +7,23 @@ -- [Ethereum 2.0 Phase 1 -- Network specification for Data Availability Sampling](#ethereum-20-phase-1----network-specification-for-data-availability-sampling) - - [Table of contents](#table-of-contents) - - [Introduction](#introduction) - - [DAS Subnets](#das-subnets) - - [Horizontal subnets](#horizontal-subnets) - - [Publishing](#publishing) - - [Horizontal propagation](#horizontal-propagation) - - [Horizontal to vertical](#horizontal-to-vertical) - - [Vertical subnets](#vertical-subnets) - - [Slow rotation: Backbone](#slow-rotation-backbone) - - [Quick Rotation: Sampling](#quick-rotation-sampling) - - [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push) - - [Topics and messages](#topics-and-messages) - - [Horizontal subnets: `shard_blob_{shard}`](#horizontal-subnets-shard_blob_shard) - - [Vertical subnets: `das_sample_{subnet_index}`](#vertical-subnets-das_sample_subnet_index) - - [DAS in the Req-Resp domain: Pull](#das-in-the-req-resp-domain-pull) - - [Messages](#messages) - - [DASQuery](#dasquery) + +- [Introduction](#introduction) +- [DAS Subnets](#das-subnets) + - [Horizontal subnets](#horizontal-subnets) + - [Publishing](#publishing) + - [Horizontal propagation](#horizontal-propagation) + - [Horizontal to vertical](#horizontal-to-vertical) + - [Vertical subnets](#vertical-subnets) + - [Slow rotation: Backbone](#slow-rotation-backbone) + - [Quick Rotation: Sampling](#quick-rotation-sampling) +- [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push) + - [Topics and messages](#topics-and-messages) + - [Horizontal subnets: `shard_blob_{shard}`](#horizontal-subnets-shard_blob_shard) + - [Vertical subnets: `das_sample_{subnet_index}`](#vertical-subnets-das_sample_subnet_index) +- [DAS in the Req-Resp domain: Pull](#das-in-the-req-resp-domain-pull) + - [Messages](#messages) + - [DASQuery](#dasquery) diff --git a/specs/phase1/das-participation.md b/specs/phase1/das-participation.md index 12e53049b..5300ba0d3 100644 --- a/specs/phase1/das-participation.md +++ b/specs/phase1/das-participation.md @@ -8,7 +8,17 @@ -TODO + +- [Data Availability Sampling](#data-availability-sampling) +- [GossipSub](#gossipsub) + - [Horizontal subnets](#horizontal-subnets) + - [Vertical subnets](#vertical-subnets) + - [Slow rotation: Backbone](#slow-rotation-backbone) + - [Quick rotation: Sampling](#quick-rotation-sampling) + - [DAS during network instability](#das-during-network-instability) + - [Stage 0: Waiting on missing samples](#stage-0-waiting-on-missing-samples) + - [Stage 1: Pulling missing samples from known peers](#stage-1-pulling-missing-samples-from-known-peers) + - [Stage 2: Pulling missing data from validators with custody.](#stage-2-pulling-missing-data-from-validators-with-custody) From 02e3144283b437506d8765b211f80aca667d49cd Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 4 Jan 2021 22:26:17 +0100 Subject: [PATCH 085/127] fix DAS p2p validation rule wording --- specs/phase1/das-p2p.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase1/das-p2p.md b/specs/phase1/das-p2p.md index 016d1a591..15a7dc66f 100644 --- a/specs/phase1/das-p2p.md +++ b/specs/phase1/das-p2p.md @@ -166,7 +166,7 @@ The following validations MUST pass before forwarding the `signed_blob` (with in - _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `blob.slot <= current_slot` (a client MAY queue future blobs for processing at the appropriate slot). -- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)`: `blob.slot`. +- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)` combination. - _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. - _[REJECT]_ The `blob.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. - _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey, signed over the SSZ output of `commit_to_data(blob.data)`. From b7d965b5431cd5e9b8548ea7a92297ec91e18637 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 4 Jan 2021 22:47:42 +0100 Subject: [PATCH 086/127] split out general phase1 networking from DAS --- specs/phase1/das-p2p.md | 18 ++--------- specs/phase1/p2p-phase1.md | 63 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 16 deletions(-) create mode 100644 specs/phase1/p2p-phase1.md diff --git a/specs/phase1/das-p2p.md b/specs/phase1/das-p2p.md index 15a7dc66f..f227872ba 100644 --- a/specs/phase1/das-p2p.md +++ b/specs/phase1/das-p2p.md @@ -142,15 +142,13 @@ If the node does not already have connected peers on the topic it needs to sampl Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: | Name | Message Type | |----------------------------------|---------------------------| -| `shard_blob_{shard}` | `SignedShardBlob` | -| `shard_header_{shard}` | `SignedShardHeader` | | `das_sample_{subnet_index}` | `DASSample` | -TODO: separate phase 1 network spec. +Also see the [Phase1 general networking spec](./p2p-phase1.md) for important topics such as that of the shard-blobs and shard-headers. #### Horizontal subnets: `shard_blob_{shard}` -Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets. +Extending the regular `shard_blob_{shard}` as [defined in the Phase1 networking specification](./p2p-phase1.md#shard-blobs-shard_blob_shard) If participating in DAS, upon receiving a `signed_blob` for the first time, with a `slot` not older than `MAX_RESAMPLE_TIME`, a subscriber of a `shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical subnets. @@ -161,18 +159,6 @@ Take `blob = signed_blob.blob`: The [DAS participation spec](./das-participation.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets. -The following validations MUST pass before forwarding the `signed_blob` (with inner `blob`) on the horizontal subnet or creating samples for it. -- _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) -- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `blob.slot <= current_slot` - (a client MAY queue future blobs for processing at the appropriate slot). -- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)` combination. -- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. -- _[REJECT]_ The `blob.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. -- _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey, signed over the SSZ output of `commit_to_data(blob.data)`. -- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot. - -TODO: make double blob proposals slashable? #### Vertical subnets: `das_sample_{subnet_index}` diff --git a/specs/phase1/p2p-phase1.md b/specs/phase1/p2p-phase1.md new file mode 100644 index 000000000..633814a35 --- /dev/null +++ b/specs/phase1/p2p-phase1.md @@ -0,0 +1,63 @@ +# Ethereum 2.0 Phase 1 -- Network specification + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Introduction](#introduction) +- [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push) + - [Topics and messages](#topics-and-messages) + - [Shard blobs: `shard_blob_{shard}`](#shard-blobs-shard_blob_shard) + - [Shard header: `shard_header`](#shard-header-shard_header) + + + +## Introduction + +With Phase 1, shard data is introduced, which requires various new additions and adjustments to the groundwork that Phase 0 implements. +The specification of these changes continues in the same format, and assumes Phase0 as pre-requisite. +The Phase 0 adjustments and additions for Shards are outlined in this document. +See the [Data Availability Sampling network specification](./das-p2p.md) for Phase 1 networking specifc to Data availability. + + +## DAS in the Gossip domain: Push + +### Topics and messages + +Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: +| Name | Message Type | +|----------------------------------|---------------------------| +| `shard_blob_{shard}` | `SignedShardBlob` | +| `shard_header` | `SignedShardHeader` | + +The [DAS network specification](./das-p2p.md) defines additional topics. + +#### Shard blobs: `shard_blob_{shard}` + +Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets. + +The [DAS networking specification](./das-p2p.md#horizontal-subnets) outlines an extension of the regular behavior on this topic. + +The following validations MUST pass before forwarding the `signed_blob` (with inner `blob`) on the horizontal subnet or creating samples for it. +- _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) +- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- + i.e. validate that `blob.slot <= current_slot` + (a client MAY queue future blobs for processing at the appropriate slot). +- _[IGNORE]_ The blob is the first blob with valid signature received for the proposer for the `(slot, shard)` combination. +- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. +- _[REJECT]_ The `blob.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. +- _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey, signed over the SSZ output of `commit_to_data(blob.data)`. +- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot. + +TODO: make double blob proposals slashable? + +#### Shard header: `shard_header` + +Shard header data, in the form of a `SignedShardHeader` is published to the global `shard_header` subnet. + +TODO: validation conditions. + From cf8676690a2b48d426d17d3488806daa27dbceac Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 4 Jan 2021 22:58:55 +0100 Subject: [PATCH 087/127] notes about backbone identification function claims --- specs/phase1/das-p2p.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/phase1/das-p2p.md b/specs/phase1/das-p2p.md index f227872ba..dfb9d1542 100644 --- a/specs/phase1/das-p2p.md +++ b/specs/phase1/das-p2p.md @@ -117,9 +117,11 @@ To allow for subscriptions to rotate quickly and randomly, a backbone is formed This backbone is based on a pure function of the *node* identity and time: - Nodes can be found *without additional discovery overhead*: - peers on a vertical topic can be found by searching the local peerstore for identities that hash to the desired topic(s). + peers on a vertical topic can be found by searching the local peerstore for identities that hash to the desired topic(s), + assuming the peerstore already has a large enough variety of peers. - Nodes can be held accountable for contributing to the backbone: peers that particpate in DAS but are not active on the appropriate backbone topics can be scored down. + *Note: This is experimental, DAS should be light enough for all participants to run, but scoring needs to undergo testing* A node should anticipate backbone topics to subscribe to based their own identity. These subscriptions rotate slowly, and with different offsets per node identity to avoid sudden network-wide rotations. From 8116e1c0a367d40724a6aa69b48dd71ed1da6c85 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 4 Jan 2021 23:03:06 +0100 Subject: [PATCH 088/127] minor fixes to appease CI --- specs/phase1/das-p2p.md | 1 - specs/phase1/das-participation.md | 1 - specs/phase1/p2p-phase1.md | 3 +-- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/specs/phase1/das-p2p.md b/specs/phase1/das-p2p.md index dfb9d1542..e1579576d 100644 --- a/specs/phase1/das-p2p.md +++ b/specs/phase1/das-p2p.md @@ -7,7 +7,6 @@ - - [Introduction](#introduction) - [DAS Subnets](#das-subnets) - [Horizontal subnets](#horizontal-subnets) diff --git a/specs/phase1/das-participation.md b/specs/phase1/das-participation.md index 5300ba0d3..63ceb240a 100644 --- a/specs/phase1/das-participation.md +++ b/specs/phase1/das-participation.md @@ -8,7 +8,6 @@ - - [Data Availability Sampling](#data-availability-sampling) - [GossipSub](#gossipsub) - [Horizontal subnets](#horizontal-subnets) diff --git a/specs/phase1/p2p-phase1.md b/specs/phase1/p2p-phase1.md index 633814a35..c17bea36f 100644 --- a/specs/phase1/p2p-phase1.md +++ b/specs/phase1/p2p-phase1.md @@ -7,7 +7,6 @@ - - [Introduction](#introduction) - [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push) - [Topics and messages](#topics-and-messages) @@ -21,7 +20,7 @@ With Phase 1, shard data is introduced, which requires various new additions and adjustments to the groundwork that Phase 0 implements. The specification of these changes continues in the same format, and assumes Phase0 as pre-requisite. The Phase 0 adjustments and additions for Shards are outlined in this document. -See the [Data Availability Sampling network specification](./das-p2p.md) for Phase 1 networking specifc to Data availability. +See the [Data Availability Sampling network specification](./das-p2p.md) for Phase 1 networking specific to Data availability. ## DAS in the Gossip domain: Push From b3c5e65698801700eec068b9a20330c900aae663 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Mar 2021 23:31:12 +0100 Subject: [PATCH 089/127] cleanup old docs about transition-full sharding --- specs/phase1/shard-fork-choice.md | 178 ------------------------------ specs/phase1/shard-transition.md | 145 ------------------------ 2 files changed, 323 deletions(-) delete mode 100644 specs/phase1/shard-fork-choice.md delete mode 100644 specs/phase1/shard-transition.md diff --git a/specs/phase1/shard-fork-choice.md b/specs/phase1/shard-fork-choice.md deleted file mode 100644 index 177c9c18c..000000000 --- a/specs/phase1/shard-fork-choice.md +++ /dev/null @@ -1,178 +0,0 @@ -# Ethereum 2.0 Phase 1 -- Beacon Chain + Shard Chain Fork Choice - -**Notice**: This document is a work-in-progress for researchers and implementers. - -## Table of contents - - - - -- [Introduction](#introduction) -- [Fork choice](#fork-choice) - - [Helpers](#helpers) - - [`get_forkchoice_shard_store`](#get_forkchoice_shard_store) - - [`get_shard_latest_attesting_balance`](#get_shard_latest_attesting_balance) - - [`get_shard_head`](#get_shard_head) - - [`get_shard_ancestor`](#get_shard_ancestor) - - [`get_pending_shard_blocks`](#get_pending_shard_blocks) - - [Handlers](#handlers) - - [`on_shard_block`](#on_shard_block) - - - -## Introduction - -This document is the shard chain fork choice spec for part of Ethereum 2.0 Phase 1. It assumes the [beacon chain fork choice spec](./fork-choice.md). - -## Fork choice - -### Helpers - -#### `get_forkchoice_shard_store` - -```python -def get_forkchoice_shard_store(anchor_state: BeaconState, shard: Shard) -> ShardStore: - return ShardStore( - shard=shard, - signed_blocks={ - anchor_state.shard_states[shard].latest_block_root: SignedShardBlock( - message=ShardBlock(slot=compute_previous_slot(anchor_state.slot), shard=shard) - ) - }, - block_states={anchor_state.shard_states[shard].latest_block_root: anchor_state.copy().shard_states[shard]}, - ) -``` - -#### `get_shard_latest_attesting_balance` - -```python -def get_shard_latest_attesting_balance(store: Store, shard: Shard, root: Root) -> Gwei: - shard_store = store.shard_stores[shard] - state = store.checkpoint_states[store.justified_checkpoint] - active_indices = get_active_validator_indices(state, get_current_epoch(state)) - return Gwei(sum( - state.validators[i].effective_balance for i in active_indices - if ( - i in shard_store.latest_messages - # TODO: check the latest message logic: currently, validator's previous vote of another shard - # would be ignored once their newer vote is accepted. Check if it makes sense. - and get_shard_ancestor( - store, - shard, - shard_store.latest_messages[i].root, - shard_store.signed_blocks[root].message.slot, - ) == root - ) - )) -``` - -#### `get_shard_head` - -```python -def get_shard_head(store: Store, shard: Shard) -> Root: - # Execute the LMD-GHOST fork choice - """ - Execute the LMD-GHOST fork choice. - """ - shard_store = store.shard_stores[shard] - beacon_head_root = get_head(store) - shard_head_state = store.block_states[beacon_head_root].shard_states[shard] - shard_head_root = shard_head_state.latest_block_root - shard_blocks = { - root: signed_shard_block.message for root, signed_shard_block in shard_store.signed_blocks.items() - if signed_shard_block.message.slot > shard_head_state.slot - } - while True: - # Find the valid child block roots - children = [ - root for root, shard_block in shard_blocks.items() - if shard_block.shard_parent_root == shard_head_root - ] - if len(children) == 0: - return shard_head_root - # Sort by latest attesting balance with ties broken lexicographically - shard_head_root = max( - children, key=lambda root: (get_shard_latest_attesting_balance(store, shard, root), root) - ) -``` - -#### `get_shard_ancestor` - -```python -def get_shard_ancestor(store: Store, shard: Shard, root: Root, slot: Slot) -> Root: - shard_store = store.shard_stores[shard] - block = shard_store.signed_blocks[root].message - if block.slot > slot: - return get_shard_ancestor(store, shard, block.shard_parent_root, slot) - elif block.slot == slot: - return root - else: - # root is older than queried slot, thus a skip slot. Return most recent root prior to slot - return root -``` - -#### `get_pending_shard_blocks` - -```python -def get_pending_shard_blocks(store: Store, shard: Shard) -> Sequence[SignedShardBlock]: - """ - Return the canonical shard block branch that has not yet been crosslinked. - """ - shard_store = store.shard_stores[shard] - - beacon_head_root = get_head(store) - beacon_head_state = store.block_states[beacon_head_root] - latest_shard_block_root = beacon_head_state.shard_states[shard].latest_block_root - - shard_head_root = get_shard_head(store, shard) - root = shard_head_root - signed_shard_blocks = [] - while root != latest_shard_block_root: - signed_shard_block = shard_store.signed_blocks[root] - signed_shard_blocks.append(signed_shard_block) - root = signed_shard_block.message.shard_parent_root - - signed_shard_blocks.reverse() - return signed_shard_blocks -``` - -### Handlers - -#### `on_shard_block` - -```python -def on_shard_block(store: Store, signed_shard_block: SignedShardBlock) -> None: - shard_block = signed_shard_block.message - shard = shard_block.shard - shard_store = store.shard_stores[shard] - - # Check shard parent exists - assert shard_block.shard_parent_root in shard_store.block_states - shard_parent_state = shard_store.block_states[shard_block.shard_parent_root] - - # Check beacon parent exists - assert shard_block.beacon_parent_root in store.block_states - beacon_parent_state = store.block_states[shard_block.beacon_parent_root] - - # Check that block is later than the finalized shard state slot (optimization to reduce calls to get_ancestor) - finalized_beacon_state = store.block_states[store.finalized_checkpoint.root] - finalized_shard_state = finalized_beacon_state.shard_states[shard] - assert shard_block.slot > finalized_shard_state.slot - - # Check block is a descendant of the finalized block at the checkpoint finalized slot - finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - assert ( - get_ancestor(store, shard_block.beacon_parent_root, finalized_slot) == store.finalized_checkpoint.root - ) - - # Check the block is valid and compute the post-state - shard_state = shard_parent_state.copy() - shard_state_transition(shard_state, signed_shard_block, beacon_parent_state, validate_result=True) - - # Add new block to the store - # Note: storing `SignedShardBlock` format for computing `ShardTransition.proposer_signature_aggregate` - shard_store.signed_blocks[hash_tree_root(shard_block)] = signed_shard_block - - # Add new state for this block to the store - shard_store.block_states[hash_tree_root(shard_block)] = shard_state -``` diff --git a/specs/phase1/shard-transition.md b/specs/phase1/shard-transition.md deleted file mode 100644 index 35d421cdd..000000000 --- a/specs/phase1/shard-transition.md +++ /dev/null @@ -1,145 +0,0 @@ -# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs - -**Notice**: This document is a work-in-progress for researchers and implementers. - -## Table of contents - - - - -- [Introduction](#introduction) -- [Helper functions](#helper-functions) - - [Shard block verification functions](#shard-block-verification-functions) - - [`verify_shard_block_message`](#verify_shard_block_message) - - [`verify_shard_block_signature`](#verify_shard_block_signature) -- [Shard state transition function](#shard-state-transition-function) -- [Fraud proofs](#fraud-proofs) - - [Verifying the proof](#verifying-the-proof) - - - -## Introduction - -This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0. - -## Helper functions - -### Shard block verification functions - -#### `verify_shard_block_message` - -```python -def verify_shard_block_message(beacon_parent_state: BeaconState, - shard_parent_state: ShardState, - block: ShardBlock) -> bool: - # Check `shard_parent_root` field - assert block.shard_parent_root == shard_parent_state.latest_block_root - # Check `beacon_parent_root` field - beacon_parent_block_header = beacon_parent_state.latest_block_header.copy() - if beacon_parent_block_header.state_root == Root(): - beacon_parent_block_header.state_root = hash_tree_root(beacon_parent_state) - beacon_parent_root = hash_tree_root(beacon_parent_block_header) - assert block.beacon_parent_root == beacon_parent_root - # Check `slot` field - shard = block.shard - next_slot = Slot(block.slot + 1) - offset_slots = compute_offset_slots(get_latest_slot_for_shard(beacon_parent_state, shard), next_slot) - assert block.slot in offset_slots - # Check `proposer_index` field - assert block.proposer_index == get_shard_proposer_index(beacon_parent_state, block.slot, shard) - # Check `body` field - assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE - return True -``` - -#### `verify_shard_block_signature` - -```python -def verify_shard_block_signature(beacon_parent_state: BeaconState, - signed_block: SignedShardBlock) -> bool: - proposer = beacon_parent_state.validators[signed_block.message.proposer_index] - domain = get_domain(beacon_parent_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot)) - signing_root = compute_signing_root(signed_block.message, domain) - return bls.Verify(proposer.pubkey, signing_root, signed_block.signature) -``` - -## Shard state transition function - -The post-state corresponding to a pre-state `shard_state` and a signed block `signed_block` is defined as `shard_state_transition(shard_state, signed_block, beacon_parent_state)`, where `beacon_parent_state` is the parent beacon state of the `signed_block`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid. - -```python -def shard_state_transition(shard_state: ShardState, - signed_block: SignedShardBlock, - beacon_parent_state: BeaconState, - validate_result: bool = True) -> None: - assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message) - - if validate_result: - assert verify_shard_block_signature(beacon_parent_state, signed_block) - - process_shard_block(shard_state, signed_block.message) -``` - -```python -def process_shard_block(shard_state: ShardState, - block: ShardBlock) -> None: - """ - Update ``shard_state`` with shard ``block``. - """ - shard_state.slot = block.slot - prev_gasprice = shard_state.gasprice - shard_block_length = len(block.body) - shard_state.gasprice = compute_updated_gasprice(prev_gasprice, uint64(shard_block_length)) - if shard_block_length != 0: - shard_state.latest_block_root = hash_tree_root(block) -``` - -## Fraud proofs - -### Verifying the proof - -TODO. The intent is to have a single universal fraud proof type, which contains the following parts: - -1. An on-time attestation `attestation` on some shard `shard` signing a `transition: ShardTransition` -2. An index `offset_index` of a particular position to focus on -3. The `transition: ShardTransition` itself -4. The full body of the shard block `shard_block` -5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing -6. The `subkey` to generate the custody bit - -Call the following function to verify the proof: - -```python -def is_valid_fraud_proof(beacon_state: BeaconState, - attestation: Attestation, - offset_index: uint64, - transition: ShardTransition, - block: ShardBlock, - subkey: BLSPubkey, - beacon_parent_block: BeaconBlock) -> bool: - # 1. Check if `custody_bits[offset_index][j] != generate_custody_bit(subkey, block_contents)` for any `j`. - custody_bits = attestation.custody_bits_blocks - for j in range(len(custody_bits[offset_index])): - if custody_bits[offset_index][j] != generate_custody_bit(subkey, block): - return True - - # 2. Check if the shard state transition result is wrong between - # `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`. - if offset_index == 0: - shard_states = beacon_parent_block.body.shard_transitions[attestation.data.shard].shard_states - shard_state = shard_states[len(shard_states) - 1] - else: - shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here. - - process_shard_block(shard_state, block) - if shard_state != transition.shard_states[offset_index]: - return True - - return False -``` - -```python -def generate_custody_bit(subkey: BLSPubkey, block: ShardBlock) -> bool: - # TODO - ... -``` From 2696968f24bd50f414021b965e112482ca327176 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Mar 2021 23:39:39 +0100 Subject: [PATCH 090/127] cleanup validator doc; lightclient was moved to altair, shards are data-heavy, no transitions for now --- specs/phase1/validator.md | 459 +------------------------------------- 1 file changed, 8 insertions(+), 451 deletions(-) diff --git a/specs/phase1/validator.md b/specs/phase1/validator.md index ccc877be0..c6d29e725 100644 --- a/specs/phase1/validator.md +++ b/specs/phase1/validator.md @@ -1,6 +1,8 @@ # Ethereum 2.0 Phase 1 -- Honest Validator -**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 1](./), which describes the expected actions of a "validator" participating in the Ethereum 2.0 Phase 1 protocol. +**Notice**: This document is a work-in-progress for researchers and implementers. +This is an accompanying document to [Ethereum 2.0 Phase 1](./), which describes the expected actions of a "validator" +participating in the Ethereum 2.0 Phase 1 protocol. ## Table of contents @@ -59,26 +61,12 @@ ## Introduction -This document represents the expected behavior of an "honest validator" with respect to Phase 1 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope. - -A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof-of-work networks in which miners provide collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol. - ## Prerequisites This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use them as a reference throughout. - -## Constants - -See constants from [Phase 0 validator guide](../phase0/validator.md#constants). - -### Misc - -| Name | Value | Unit | Duration | -| - | - | :-: | :-: | -| `TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT` | `2**3` (= 8) | validators | | -| `LIGHT_CLIENT_PREPARATION_EPOCHS` | `2**2` (= 4) | epochs | | +All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) +docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use them as a reference throughout. ## Becoming a validator @@ -88,38 +76,6 @@ Becoming a validator in Phase 1 is unchanged from Phase 0. See the [Phase 0 vali Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details. -### Lookahead - -Lookahead for beacon committee assignments operates in the same manner as Phase 0, but committee members must join a shard block pubsub topic in addition to the committee attestation topic. - -Specifically _after_ finding stable peers of attestation subnets (see Phase 0) a validator should: -* Let `shard = compute_shard_from_committee_index(state, committee_index, slot)` -* Subscribe to the pubsub topic `shard_{shard}_block` (attestation subnet peers should have this topic available). - -TODO: For now, the `state` we pass to `compute_shard_from_committee_index` is the current state without considering `len(state.shard_states)`, i.e., the result from `get_active_shard_count(state)` changes. We should fix it when we have shard count update logic. - -## Beacon chain responsibilities - -A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attesting). Proposals happen infrequently, whereas attestations should be created once per epoch. - -These responsibilities are largely unchanged from Phase 0, but utilize the updated `SignedBeaconBlock`, `BeaconBlock`, `BeaconBlockBody`, `Attestation`, and `AttestationData` definitions found in Phase 1. Below notes only the additional and modified behavior with respect to Phase 0. - -Phase 1 adds light client committees and associated responsibilities, discussed [below](#light-client-committee). - -### Block proposal - -#### Preparing for a `BeaconBlock` - -`slot`, `proposer_index`, `parent_root`, `state_root` fields are unchanged. - -#### Constructing the `BeaconBlockBody` - -`randao_reveal`, `eth1_data`, and `graffiti` are unchanged. - -`proposer_slashings`, `deposits`, and `voluntary_exits` are unchanged. - -`attester_slashings` and `attestations` operate exactly as in Phase 0, but with new definitations of `AttesterSlashing` and `Attestation`, along with modified validation conditions found in `process_attester_slashing` and `process_attestation`. - ##### Custody slashings Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./custody-game.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](./custody-game.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE). @@ -132,414 +88,14 @@ Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./custody-game.md#custodyk Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./custody-game.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secrete reveal included. -##### Shard transitions - -Exactly `MAX_SHARDS` [`ShardTransition`](./beacon-chain.md#shardtransition) objects are included in the block. Default each to an empty `ShardTransition()`. Then for each committee assigned to the slot with an associated `committee_index` and `shard`, set `shard_transitions[shard] = full_transitions[winning_root]` if the committee had enough weight to form a crosslink this slot. - -Specifically: -* Call `shards, winning_roots = get_shard_winning_roots(state, block.body.attestations)` -* Let `full_transitions` be a dictionary mapping from the `shard_transition_root`s found in `attestations` to the corresponding full `ShardTransition` -* Then for each `shard` and `winning_root` in `zip(shards, winning_roots)` set `shard_transitions[shard] = full_transitions[winning_root]` - -*Note*: The `state` passed into `get_shard_winning_roots` must be transitioned the slot of `block.slot` to run accurately due to the internal use of `get_online_validator_indices` and `is_on_time_attestation`. - -```python -def get_shard_winning_roots(state: BeaconState, - attestations: Sequence[Attestation]) -> Tuple[Sequence[Shard], Sequence[Root]]: - shards = [] - winning_roots = [] - online_indices = get_online_validator_indices(state) - on_time_attestation_slot = compute_previous_slot(state.slot) - committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(on_time_attestation_slot)) - for committee_index in map(CommitteeIndex, range(committee_count)): - shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot) - # All attestations in the block for this committee/shard and are "on time" - shard_attestations = [ - attestation for attestation in attestations - if is_on_time_attestation(state, attestation.data) and attestation.data.index == committee_index - ] - committee = get_beacon_committee(state, on_time_attestation_slot, committee_index) - - # Loop over all shard transition roots, looking for a winning root - shard_transition_roots = set(a.data.shard_transition_root for a in shard_attestations) # non-duplicate - for shard_transition_root in sorted(shard_transition_roots): - transition_attestations = [ - a for a in shard_attestations - if a.data.shard_transition_root == shard_transition_root - ] - transition_participants: Set[ValidatorIndex] = set() - for attestation in transition_attestations: - participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) - transition_participants = transition_participants.union(participants) - - enough_online_stake = ( - get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >= - get_total_balance(state, online_indices.intersection(committee)) * 2 - ) - if enough_online_stake: - shards.append(shard) - winning_roots.append(shard_transition_root) - break - - return shards, winning_roots -``` - -##### Light client fields - -First retrieve `best_aggregate` from `get_best_light_client_aggregate(block, aggregates)` where `aggregates` is a list of valid aggregated `LightClientVote`s for the previous slot. - -Then: -* Set `light_client_bits = best_aggregate.aggregation_bits` -* Set `light_client_signature = best_aggregate.signature` - -```python -def get_best_light_client_aggregate(block: BeaconBlock, - aggregates: Sequence[LightClientVote]) -> LightClientVote: - viable_aggregates = [ - aggregate for aggregate in aggregates - if ( - aggregate.data.slot == compute_previous_slot(block.slot) - and aggregate.data.beacon_block_root == block.parent_root - ) - ] - - return max( - viable_aggregates, - # Ties broken by lexicographically by hash_tree_root - key=lambda a: (len([i for i in a.aggregation_bits if i == 1]), hash_tree_root(a)), - default=LightClientVote(), - ) -``` - -#### Packaging into a `SignedBeaconBlock` - -Packaging into a `SignedBeaconBlock` is unchanged from Phase 0. - -### Attesting - -A validator is expected to create, sign, and broadcast an attestation during each epoch. - -Assignments and the core of this duty are unchanged from Phase 0. There are a few additional fields related to the assigned shard chain. - -The `Attestation` and `AttestationData` defined in the [Phase 1 Beacon Chain spec](./beacon-chain.md) utilizes `shard_transition_root: Root` rather than a full `ShardTransition`. For the purposes of the validator and p2p layer, a modified `FullAttestationData` and containing `FullAttestation` are used to send the accompanying `ShardTransition` in its entirety. Note that due to the properties of SSZ `hash_tree_root`, the root and signatures of `AttestationData` and `FullAttestationData` are equivalent. - -#### `FullAttestationData` - -```python -class FullAttestationData(Container): - slot: Slot - index: CommitteeIndex - # LMD GHOST vote - beacon_block_root: Root - # FFG vote - source: Checkpoint - target: Checkpoint - # Current-slot shard block root - shard_head_root: Root - # Full shard transition - shard_transition: ShardTransition -``` - -#### `FullAttestation` - -```python -class FullAttestation(Container): - aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] - data: FullAttestationData - signature: BLSSignature -``` - -#### Timing - -Note the timing of when to create/broadcast is altered from Phase 1. - -A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid `BeaconBlock` from the expected beacon block proposer and a valid `ShardBlock` for the expected shard block proposer for the assigned `slot` or (b) one-half of the `slot` has transpired (`SECONDS_PER_SLOT / 2` seconds after the start of `slot`) -- whichever comes _first_. - -#### Attestation data - -`attestation_data` is constructed in the same manner as Phase 0 but uses `FullAttestationData` with the addition of two fields -- `shard_head_root` and `shard_transition`. - -- Let `head_block` be the result of running the fork choice during the assigned slot. -- Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`. -- Let `shard_head_block` be the result of running the fork choice on the assigned shard chain during the assigned slot. -- Let `shard_blocks` be the shard blocks in the chain starting immediately _after_ the most recent crosslink (`head_state.shard_transitions[shard].latest_block_root`) up to the `shard_head_block` (i.e. the value of the shard fork choice store of `get_pending_shard_blocks(store, shard_store)`). - -*Note*: We assume that the fork choice only follows branches with valid `offset_slots` with respect to the most recent beacon state shard transition for the queried shard. - -##### Shard head root - -If `attestation_data.slot == GENESIS_SLOT`, set `attestation_data.shard_head_root = Root()`. Otherwise, set `attestation_data.shard_head_root = hash_tree_root(shard_head_block)`. - -##### Shard transition - -Set `shard_transition` to the value returned by `get_shard_transition(head_state, shard, shard_blocks)`. - -```python -def get_shard_transition_fields( - beacon_state: BeaconState, - shard: Shard, - shard_blocks: Sequence[SignedShardBlock], -) -> Tuple[Sequence[uint64], Sequence[Root], Sequence[ShardState]]: - shard_block_lengths = [] # type: PyList[uint64] - shard_data_roots = [] # type: PyList[Root] - shard_states = [] # type: PyList[ShardState] - - shard_state = beacon_state.shard_states[shard] - shard_block_slots = [shard_block.message.slot for shard_block in shard_blocks] - offset_slots = compute_offset_slots( - get_latest_slot_for_shard(beacon_state, shard), - Slot(beacon_state.slot + 1), - ) - for slot in offset_slots: - if slot in shard_block_slots: - shard_block = shard_blocks[shard_block_slots.index(slot)] - shard_data_roots.append(hash_tree_root(shard_block.message.body)) - else: - shard_block = SignedShardBlock(message=ShardBlock(slot=slot, shard=shard)) - shard_data_roots.append(Root()) - shard_state = shard_state.copy() - process_shard_block(shard_state, shard_block.message) - shard_states.append(shard_state) - shard_block_lengths.append(uint64(len(shard_block.message.body))) - - return shard_block_lengths, shard_data_roots, shard_states -``` - -```python -def get_shard_transition(beacon_state: BeaconState, - shard: Shard, - shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition: - # NOTE: We currently set `PHASE_1_FORK_SLOT` to `GENESIS_SLOT` for test vectors. - if beacon_state.slot == GENESIS_SLOT: - return ShardTransition() - - offset_slots = compute_offset_slots( - get_latest_slot_for_shard(beacon_state, shard), - Slot(beacon_state.slot + 1), - ) - shard_block_lengths, shard_data_roots, shard_states = ( - get_shard_transition_fields(beacon_state, shard, shard_blocks) - ) - - if len(shard_blocks) > 0: - proposer_signatures = [shard_block.signature for shard_block in shard_blocks] - proposer_signature_aggregate = bls.Aggregate(proposer_signatures) - else: - proposer_signature_aggregate = NO_SIGNATURE - - return ShardTransition( - start_slot=offset_slots[0], - shard_block_lengths=shard_block_lengths, - shard_data_roots=shard_data_roots, - shard_states=shard_states, - proposer_signature_aggregate=proposer_signature_aggregate, - ) -``` - #### Construct attestation -Next, the validator creates `attestation`, a `FullAttestation` as defined above. - `attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` are unchanged from Phase 0. But safety/validity in signing the message is premised upon calculation of the "custody bit" [TODO]. -### Attestation Aggregation - -Some validators are selected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`. - -Aggregation selection and the core of this duty are largely unchanged from Phase 0. Any additional components or changes are noted. - -#### Broadcast aggregate - -Note the timing of when to broadcast aggregates is altered in Phase 1+. - -If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot` -- that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`. - -##### `AggregateAndProof` - -`AggregateAndProof` is unchanged other than the contained `Attestation`. - -```python -class AggregateAndProof(Container): - aggregator_index: ValidatorIndex - aggregate: Attestation - selection_proof: BLSSignature -``` - -##### `SignedAggregateAndProof` - -`AggregateAndProof` is unchanged other than the contained `AggregateAndProof`. - -```python -class SignedAggregateAndProof(Container): - message: AggregateAndProof - signature: BLSSignature -``` - -### Light client committee - -In addition to the core beacon chain responsibilities, Phase 1 adds an additional role -- the Light Client Committee -- to aid in light client functionality. - -Validators serve on the light client committee for `LIGHT_CLIENT_COMMITTEE_PERIOD` epochs and the assignment to be on a committee is known `LIGHT_CLIENT_COMMITTEE_PERIOD` epochs in advance. - -#### Preparation - -When `get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == LIGHT_CLIENT_COMMITTEE_PERIOD - LIGHT_CLIENT_PREPARATION_EPOCHS` each validator must check if they are in the next period light client committee by calling `is_in_next_light_client_committee()`. - -If the validator is in the next light client committee, they must join the `light_client_votes` pubsub topic to begin duties at the start of the next period. - -```python -def is_in_next_light_client_committee(state: BeaconState, index: ValidatorIndex) -> bool: - next_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD) - return index in next_committee -``` - -#### Light client vote - -During a period of epochs that the validator is a part of the light client committee (`validator_index in get_light_client_committee(state, epoch)`), the validator creates and broadcasts a `LightClientVote` at each slot. - -A validator should create and broadcast the `light_client_vote` to the `light_client_votes` pubsub topic when either (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the `slot` have transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_. - -- Let `light_client_committee = get_light_client_committee(state, compute_epoch_at_slot(slot))` - -##### Light client vote data - -First the validator constructs `light_client_vote_data`, a [`LightClientVoteData`](#lightclientvotedata) object. - -* Let `head_block` be the result of running the fork choice during the assigned slot. -* Set `light_client_vote.slot = slot`. -* Set `light_client_vote.beacon_block_root = hash_tree_root(head_block)`. - -###### `LightClientVoteData` - -```python -class LightClientVoteData(Container): - slot: Slot - beacon_block_root: Root -``` - -##### Construct vote - -Then the validator constructs `light_client_vote`, a [`LightClientVote`](#lightclientvote) object. - -* Set `light_client_vote.data = light_client_vote_data`. -* Set `light_client_vote.aggregation_bits` to be a `Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]`, where the bit of the index of the validator in the `light_client_committee` is set to `0b1` and all other bits are are set to `0b0`. -* Set `light_client_vote.signature = vote_signature` where `vote_signature` is obtained from: - -```python -def get_light_client_vote_signature(state: BeaconState, - light_client_vote_data: LightClientVoteData, - privkey: int) -> BLSSignature: - domain = get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(light_client_vote_data.slot)) - signing_root = compute_signing_root(light_client_vote_data, domain) - return bls.Sign(privkey, signing_root) -``` - -###### `LightClientVote` - -```python -class LightClientVote(Container): - data: LightClientVoteData - aggregation_bits: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE] - signature: BLSSignature -``` - -##### Broadcast - -Finally, the validator broadcasts `light_client_vote` to the `light_client_votes` pubsub topic. - -#### Light client vote aggregation - -Some validators in the light client committee are selected to locally aggregate light client votes with a similar `light_client_vote_data` to their constructed `light_client_vote` for the assigned `slot`. - -#### Aggregation selection - -A validator is selected to aggregate based upon the return value of `is_light_client_aggregator()`. - -```python -def get_light_client_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature: - domain = get_domain(state, DOMAIN_LIGHT_SELECTION_PROOF, compute_epoch_at_slot(slot)) - signing_root = compute_signing_root(slot, domain) - return bls.Sign(privkey, signing_root) -``` - -```python -def is_light_client_aggregator(state: BeaconState, slot: Slot, slot_signature: BLSSignature) -> bool: - committee = get_light_client_committee(state, compute_epoch_at_slot(slot)) - modulo = max(1, len(committee) // TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT) - return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0 -``` - -#### Construct aggregate - -If the validator is selected to aggregate (`is_light_client_aggregator()`), they construct an aggregate light client vote via the following. - -Collect `light_client_votes` seen via gossip during the `slot` that have an equivalent `light_client_vote_data` to that constructed by the validator, and create a `aggregate_light_client_vote: LightClientVote` with the following fields. - -* Set `aggregate_light_client_vote.data = light_client_vote_data` where `light_client_vote_data` is the `LightClientVoteData` object that is the same for each individual light client vote being aggregated. -* Set `aggregate_light_client_vote.aggregation_bits` to be a `Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]`, where each bit set from each individual light client vote is set to `0b1`. -* Set `aggregate_light_client_vote.signature = aggregate_light_client_signature` where `aggregate_light_client_signature` is obtained from `get_aggregate_light_client_signature`. - -```python -def get_aggregate_light_client_signature(light_client_votes: Sequence[LightClientVote]) -> BLSSignature: - signatures = [light_client_vote.signature for light_client_vote in light_client_votes] - return bls.Aggregate(signatures) -``` - -#### Broadcast aggregate - -If the validator is selected to aggregate (`is_light_client_aggregator`), then they broadcast their best aggregate light client vote as a `SignedLightAggregateAndProof` to the global aggregate light client vote channel (`aggregate_light_client_votes`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`. - -Selection proofs are provided in `LightAggregateAndProof` to prove to the gossip channel that the validator has been selected as an aggregator. - -`LightAggregateAndProof` messages are signed by the aggregator and broadcast inside of `SignedLightAggregateAndProof` objects to prevent a class of DoS attacks and message forgeries. - -First, `light_aggregate_and_proof = get_light_aggregate_and_proof(state, validator_index, aggregate_light_client_vote, privkey)` is constructed. - -```python -def get_light_aggregate_and_proof(state: BeaconState, - aggregator_index: ValidatorIndex, - aggregate: LightClientVote, - privkey: int) -> LightAggregateAndProof: - return LightAggregateAndProof( - aggregator_index=aggregator_index, - aggregate=aggregate, - selection_proof=get_light_client_slot_signature(state, aggregate.data.slot, privkey), - ) -``` - -Then `signed_light_aggregate_and_proof = SignedLightAggregateAndProof(message=light_aggregate_and_proof, signature=signature)` is constructed and broadast. Where `signature` is obtained from: - -```python -def get_light_aggregate_and_proof_signature(state: BeaconState, - aggregate_and_proof: LightAggregateAndProof, - privkey: int) -> BLSSignature: - aggregate = aggregate_and_proof.aggregate - domain = get_domain(state, DOMAIN_LIGHT_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot)) - signing_root = compute_signing_root(aggregate_and_proof, domain) - return bls.Sign(privkey, signing_root) -``` - -##### `LightAggregateAndProof` - -```python -class LightAggregateAndProof(Container): - aggregator_index: ValidatorIndex - aggregate: LightClientVote - selection_proof: BLSSignature -``` - -##### `SignedLightAggregateAndProof` - -```python -class SignedLightAggregateAndProof(Container): - message: LightAggregateAndProof - signature: BLSSignature -``` ## How to avoid slashing -Proposer and Attester slashings described in Phase 0 remain in place with the - addition of the following. +Proposer and Attester slashings described in Phase 0 remain in place with the addition of the following. ### Custody slashing @@ -559,4 +115,5 @@ def get_custody_secret(state: BeaconState, return bls.Sign(privkey, signing_root) ``` -Note that the valid custody secret is always the one for the **attestation target epoch**, not to be confused with the epoch in which the shard block was generated. While they are the same most of the time, getting this wrong at custody epoch boundaries would result in a custody slashing. +Note that the valid custody secret is always the one for the **attestation target epoch**, not to be confused with the epoch in which the shard block was generated. +While they are the same most of the time, getting this wrong at custody epoch boundaries would result in a custody slashing. From 3f97cca5317fbc0bea79d5d37f355cafc0a9eab6 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Mar 2021 23:43:29 +0100 Subject: [PATCH 091/127] remove light-client-sync. Altair implements this. Shard-specific part can be implemented later. --- specs/phase1/light-client-sync.md | 174 ------------------------------ 1 file changed, 174 deletions(-) delete mode 100644 specs/phase1/light-client-sync.md diff --git a/specs/phase1/light-client-sync.md b/specs/phase1/light-client-sync.md deleted file mode 100644 index 107baa0c6..000000000 --- a/specs/phase1/light-client-sync.md +++ /dev/null @@ -1,174 +0,0 @@ -# Minimal Light Client Design - -**Notice**: This document is a work-in-progress for researchers and implementers. - -## Table of contents - - - - - -- [Introduction](#introduction) -- [Custom types](#custom-types) -- [Constants](#constants) -- [Containers](#containers) - - [`LightClientUpdate`](#lightclientupdate) -- [Helpers](#helpers) - - [`LightClientMemory`](#lightclientmemory) - - [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances) -- [Light client state updates](#light-client-state-updates) -- [Data overhead](#data-overhead) - - - - -## Introduction - -Ethereum 2.0 is designed to be light client friendly. This allows low-resource clients such as mobile phones to access Ethereum 2.0 with reasonable safety and liveness. It also facilitates the development of "bridges" to external blockchains. This document suggests a minimal light client design for the beacon chain. - -## Custom types - -We define the following Python custom types for type hinting and readability: - -| Name | SSZ equivalent | Description | -| - | - | - | -| `CompactValidator` | `uint64` | compact representation of a validator for light clients | - -## Constants - -| Name | Value | -| - | - | -| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH` | `4` | -| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX` | **TBD** | -| `PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH` | `5` | -| `PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX` | **TBD** | - -## Containers - -### `LightClientUpdate` - -```python -class LightClientUpdate(Container): - # Shard block root (and authenticating signature data) - shard_block_root: Root - fork_version: Version - aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] - signature: BLSSignature - # Updated beacon header (and authenticating branch) - header: BeaconBlockHeader - header_branch: Vector[Bytes32, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH] - # Updated period committee (and authenticating branch) - committee: CompactCommittee - committee_branch: Vector[Bytes32, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)] -``` - -## Helpers - -### `LightClientMemory` - -```python -@dataclass -class LightClientMemory(object): - shard: Shard # Randomly initialized and retained forever - header: BeaconBlockHeader # Beacon header which is not expected to revert - # period committees corresponding to the beacon header - previous_committee: CompactCommittee - current_committee: CompactCommittee - next_committee: CompactCommittee -``` - -### `get_persistent_committee_pubkeys_and_balances` - -```python -def get_persistent_committee_pubkeys_and_balances(memory: LightClientMemory, - epoch: Epoch) -> Tuple[Sequence[BLSPubkey], Sequence[uint64]]: - """ - Return pubkeys and balances for the persistent committee at ``epoch``. - """ - current_period = compute_epoch_at_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD - next_period = epoch // EPOCHS_PER_SHARD_PERIOD - assert next_period in (current_period, current_period + 1) - if next_period == current_period: - earlier_committee, later_committee = memory.previous_committee, memory.current_committee - else: - earlier_committee, later_committee = memory.current_committee, memory.next_committee - - pubkeys = [] - balances = [] - for pubkey, compact_validator in zip(earlier_committee.pubkeys, earlier_committee.compact_validators): - index, slashed, balance = unpack_compact_validator(compact_validator) - if epoch % EPOCHS_PER_SHARD_PERIOD < index % EPOCHS_PER_SHARD_PERIOD: - pubkeys.append(pubkey) - balances.append(balance) - for pubkey, compact_validator in zip(later_committee.pubkeys, later_committee.compact_validators): - index, slashed, balance = unpack_compact_validator(compact_validator) - if epoch % EPOCHS_PER_SHARD_PERIOD >= index % EPOCHS_PER_SHARD_PERIOD: - pubkeys.append(pubkey) - balances.append(balance) - return pubkeys, balances -``` - -## Light client state updates - -The state of a light client is stored in a `memory` object of type `LightClientMemory`. To advance its state a light client requests an `update` object of type `LightClientUpdate` from the network by sending a request containing `(memory.shard, memory.header.slot, slot_range_end)` and calls `update_memory(memory, update)`. - -```python -def update_memory(memory: LightClientMemory, update: LightClientUpdate) -> None: - # Verify the update does not skip a period - current_period = compute_epoch_at_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD - next_epoch = compute_epoch_of_shard_slot(update.header.slot) - next_period = next_epoch // EPOCHS_PER_SHARD_PERIOD - assert next_period in (current_period, current_period + 1) - - # Verify update header against shard block root and header branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(update.header), - branch=update.header_branch, - depth=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH, - index=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX, - root=update.shard_block_root, - ) - - # Verify persistent committee votes pass 2/3 threshold - pubkeys, balances = get_persistent_committee_pubkeys_and_balances(memory, next_epoch) - assert 3 * sum(filter(lambda i: update.aggregation_bits[i], balances)) > 2 * sum(balances) - - # Verify shard attestations - pubkeys = filter(lambda i: update.aggregation_bits[i], pubkeys) - domain = compute_domain(DOMAIN_SHARD_ATTESTER, update.fork_version) - signing_root = compute_signing_root(update.shard_block_root, domain) - assert bls.FastAggregateVerify(pubkeys, signing_root, update.signature) - - # Update period committees if entering a new period - if next_period == current_period + 1: - assert is_valid_merkle_branch( - leaf=hash_tree_root(update.committee), - branch=update.committee_branch, - depth=PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT), - index=PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX << log_2(SHARD_COUNT) + memory.shard, - root=hash_tree_root(update.header), - ) - memory.previous_committee = memory.current_committee - memory.current_committee = memory.next_committee - memory.next_committee = update.committee - - # Update header - memory.header = update.header -``` - -## Data overhead - -Once every `EPOCHS_PER_SHARD_PERIOD` epochs (~27 hours) a light client downloads a `LightClientUpdate` object: - -* `shard_block_root`: 32 bytes -* `fork_version`: 4 bytes -* `aggregation_bits`: 16 bytes -* `signature`: 96 bytes -* `header`: 8 + 32 + 32 + 32 + 96 = 200 bytes -* `header_branch`: 4 * 32 = 128 bytes -* `committee`: 128 * (48 + 8) = 7,168 bytes -* `committee_branch`: (5 + 10) * 32 = 480 bytes - -The total overhead is 8,124 bytes, or ~0.083 bytes per second. The Bitcoin SPV equivalent is 80 bytes per ~560 seconds, or ~0.143 bytes per second. Various compression optimisations (similar to [these](https://github.com/RCasatta/compressedheaders)) are possible. - -A light client can choose to update the header (without updating the committee) more frequently than once every `EPOCHS_PER_SHARD_PERIOD` epochs at a cost of 32 + 4 + 16 + 96 + 200 + 128 = 476 bytes per update. From 4068a887c58ad7512a648d2e6ce674cf610376b9 Mon Sep 17 00:00:00 2001 From: protolambda Date: Wed, 17 Mar 2021 23:44:16 +0100 Subject: [PATCH 092/127] split phase1 features --- specs/{phase1 => custody}/custody-game.md | 0 specs/{phase1 => custody}/validator.md | 0 specs/{phase1 => das}/das-internals.md | 0 specs/{phase1 => das}/das-p2p.md | 0 specs/{phase1 => das}/das-participation.md | 0 specs/{phase1 => das}/fork-choice.md | 4 +++- specs/{phase1 => sharding}/beacon-chain.md | 0 specs/{phase1 => sharding}/fork.md | 0 specs/{phase1 => sharding}/p2p-phase1.md | 0 9 files changed, 3 insertions(+), 1 deletion(-) rename specs/{phase1 => custody}/custody-game.md (100%) rename specs/{phase1 => custody}/validator.md (100%) rename specs/{phase1 => das}/das-internals.md (100%) rename specs/{phase1 => das}/das-p2p.md (100%) rename specs/{phase1 => das}/das-participation.md (100%) rename specs/{phase1 => das}/fork-choice.md (81%) rename specs/{phase1 => sharding}/beacon-chain.md (100%) rename specs/{phase1 => sharding}/fork.md (100%) rename specs/{phase1 => sharding}/p2p-phase1.md (100%) diff --git a/specs/phase1/custody-game.md b/specs/custody/custody-game.md similarity index 100% rename from specs/phase1/custody-game.md rename to specs/custody/custody-game.md diff --git a/specs/phase1/validator.md b/specs/custody/validator.md similarity index 100% rename from specs/phase1/validator.md rename to specs/custody/validator.md diff --git a/specs/phase1/das-internals.md b/specs/das/das-internals.md similarity index 100% rename from specs/phase1/das-internals.md rename to specs/das/das-internals.md diff --git a/specs/phase1/das-p2p.md b/specs/das/das-p2p.md similarity index 100% rename from specs/phase1/das-p2p.md rename to specs/das/das-p2p.md diff --git a/specs/phase1/das-participation.md b/specs/das/das-participation.md similarity index 100% rename from specs/phase1/das-participation.md rename to specs/das/das-participation.md diff --git a/specs/phase1/fork-choice.md b/specs/das/fork-choice.md similarity index 81% rename from specs/phase1/fork-choice.md rename to specs/das/fork-choice.md index e9c9f4f02..188de244d 100644 --- a/specs/phase1/fork-choice.md +++ b/specs/das/fork-choice.md @@ -15,7 +15,9 @@ ## Introduction -This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. The only change that we add from phase 0 is that we add a concept of "data dependencies"; a block is only eligible for consideration in the fork choice after a data availability test has been successfully completed for all dependencies. The "root" of a shard block for data dependency purposes is considered to be a DataCommitment object, which is a pair of a Kate commitment and a length. +This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. The only change that we add from phase 0 is that we add a concept of "data dependencies"; +a block is only eligible for consideration in the fork choice after a data availability test has been successfully completed for all dependencies. +The "root" of a shard block for data dependency purposes is considered to be a DataCommitment object, which is a pair of a Kate commitment and a length. ## Dependency calculation diff --git a/specs/phase1/beacon-chain.md b/specs/sharding/beacon-chain.md similarity index 100% rename from specs/phase1/beacon-chain.md rename to specs/sharding/beacon-chain.md diff --git a/specs/phase1/fork.md b/specs/sharding/fork.md similarity index 100% rename from specs/phase1/fork.md rename to specs/sharding/fork.md diff --git a/specs/phase1/p2p-phase1.md b/specs/sharding/p2p-phase1.md similarity index 100% rename from specs/phase1/p2p-phase1.md rename to specs/sharding/p2p-phase1.md From 882aa81ee93787bdbc14e8319b8a4283aed739f6 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:01:46 +0100 Subject: [PATCH 093/127] cleanup --- specs/custody/custody-game.md | 86 +++++++++++++++----------- specs/custody/validator.md | 47 +------------- specs/sharding/fork.md | 111 ---------------------------------- 3 files changed, 53 insertions(+), 191 deletions(-) delete mode 100644 specs/sharding/fork.md diff --git a/specs/custody/custody-game.md b/specs/custody/custody-game.md index 136116c2f..c371e698c 100644 --- a/specs/custody/custody-game.md +++ b/specs/custody/custody-game.md @@ -4,46 +4,15 @@ ## Table of contents + -- [Introduction](#introduction) -- [Constants](#constants) - - [Misc](#misc) -- [Configuration](#configuration) - - [Time parameters](#time-parameters) - - [Max operations per block](#max-operations-per-block) - - [Reward and penalty quotients](#reward-and-penalty-quotients) -- [Data structures](#data-structures) - - [New Beacon Chain operations](#new-beacon-chain-operations) - - [`CustodyChunkChallenge`](#custodychunkchallenge) - - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) - - [`CustodyChunkResponse`](#custodychunkresponse) - - [`CustodySlashing`](#custodyslashing) - - [`SignedCustodySlashing`](#signedcustodyslashing) - - [`CustodyKeyReveal`](#custodykeyreveal) - - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) -- [Helpers](#helpers) - - [`replace_empty_or_append`](#replace_empty_or_append) - - [`legendre_bit`](#legendre_bit) - - [`get_custody_atoms`](#get_custody_atoms) - - [`get_custody_secrets`](#get_custody_secrets) - - [`universal_hash_function`](#universal_hash_function) - - [`compute_custody_bit`](#compute_custody_bit) - - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) - - [`get_custody_period_for_validator`](#get_custody_period_for_validator) -- [Per-block processing](#per-block-processing) - - [Custody Game Operations](#custody-game-operations) - - [Chunk challenges](#chunk-challenges) - - [Custody chunk response](#custody-chunk-response) - - [Custody key reveals](#custody-key-reveals) - - [Early derived secret reveals](#early-derived-secret-reveals) - - [Custody Slashings](#custody-slashings) -- [Per-epoch processing](#per-epoch-processing) - - [Handling of reveal deadlines](#handling-of-reveal-deadlines) - - [Final updates](#final-updates) +TODO + + ## Introduction @@ -83,6 +52,14 @@ This document details the beacon chain additions and changes in Phase 1 of Ether | `MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES` | `uint64(2**4)` (= 16) | | `MAX_CUSTODY_SLASHINGS` | `uint64(2**0)` (= 1) | + +### Size parameters + +| Name | Value | Unit | +| - | - | - | +| `BYTES_PER_CUSTODY_CHUNK` | `uint64(2**12)` (= 4,096) | bytes | +| `CUSTODY_RESPONSE_DEPTH` | `ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)` | - | + ### Reward and penalty quotients | Name | Value | @@ -92,6 +69,45 @@ This document details the beacon chain additions and changes in Phase 1 of Ether ## Data structures +### Extended types + +#### `Validator` + +```python +class Validator(phase0.Validator): + # next_custody_secret_to_reveal is initialised to the custody period + # (of the particular validator) in which the validator is activated + # = get_custody_period_for_validator(...) + next_custody_secret_to_reveal: uint64 + # TODO: The max_reveal_lateness doesn't really make sense anymore. + # So how do we incentivise early custody key reveals now? + all_custody_secrets_revealed_epoch: Epoch # to be initialized to FAR_FUTURE_EPOCH +``` + +#### `BeaconBlockBody` + +```python +class BeaconBlockBody(phase0.BeaconBlockBody): + # Custody game + chunk_challenges: List[CustodyChunkChallenge, MAX_CUSTODY_CHUNK_CHALLENGES] + chunk_challenge_responses: List[CustodyChunkResponse, MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES] + custody_key_reveals: List[CustodyKeyReveal, MAX_CUSTODY_KEY_REVEALS] + early_derived_secret_reveals: List[EarlyDerivedSecretReveal, MAX_EARLY_DERIVED_SECRET_REVEALS] + custody_slashings: List[SignedCustodySlashing, MAX_CUSTODY_SLASHINGS] +``` + +#### `BeaconState` + +```python +class BeaconState(phase0.BeaconState): + # Future derived secrets already exposed; contains the indices of the exposed validator + # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS + exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH], + EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] + custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS] + custody_chunk_challenge_index: uint64 +``` + ### New Beacon Chain operations #### `CustodyChunkChallenge` diff --git a/specs/custody/validator.md b/specs/custody/validator.md index c6d29e725..5e4696274 100644 --- a/specs/custody/validator.md +++ b/specs/custody/validator.md @@ -10,55 +10,12 @@ participating in the Ethereum 2.0 Phase 1 protocol. -- [Introduction](#introduction) -- [Prerequisites](#prerequisites) -- [Constants](#constants) - - [Misc](#misc) -- [Becoming a validator](#becoming-a-validator) -- [Beacon chain validator assignments](#beacon-chain-validator-assignments) - - [Lookahead](#lookahead) -- [Beacon chain responsibilities](#beacon-chain-responsibilities) - - [Block proposal](#block-proposal) - - [Preparing for a `BeaconBlock`](#preparing-for-a-beaconblock) - - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - - [Custody slashings](#custody-slashings) - - [Custody key reveals](#custody-key-reveals) - - [Early derived secret reveals](#early-derived-secret-reveals) - - [Shard transitions](#shard-transitions) - - [Light client fields](#light-client-fields) - - [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock) - - [Attesting](#attesting) - - [`FullAttestationData`](#fullattestationdata) - - [`FullAttestation`](#fullattestation) - - [Timing](#timing) - - [Attestation data](#attestation-data) - - [Shard head root](#shard-head-root) - - [Shard transition](#shard-transition) - - [Construct attestation](#construct-attestation) - - [Attestation Aggregation](#attestation-aggregation) - - [Broadcast aggregate](#broadcast-aggregate) - - [`AggregateAndProof`](#aggregateandproof) - - [`SignedAggregateAndProof`](#signedaggregateandproof) - - [Light client committee](#light-client-committee) - - [Preparation](#preparation) - - [Light client vote](#light-client-vote) - - [Light client vote data](#light-client-vote-data) - - [`LightClientVoteData`](#lightclientvotedata) - - [Construct vote](#construct-vote) - - [`LightClientVote`](#lightclientvote) - - [Broadcast](#broadcast) - - [Light client vote aggregation](#light-client-vote-aggregation) - - [Aggregation selection](#aggregation-selection) - - [Construct aggregate](#construct-aggregate) - - [Broadcast aggregate](#broadcast-aggregate-1) - - [`LightAggregateAndProof`](#lightaggregateandproof) - - [`SignedLightAggregateAndProof`](#signedlightaggregateandproof) -- [How to avoid slashing](#how-to-avoid-slashing) - - [Custody slashing](#custody-slashing) +TODO + ## Introduction ## Prerequisites diff --git a/specs/sharding/fork.md b/specs/sharding/fork.md deleted file mode 100644 index d81ca64b3..000000000 --- a/specs/sharding/fork.md +++ /dev/null @@ -1,111 +0,0 @@ -# Ethereum 2.0 Phase 1 -- From Phase 0 to Phase 1 - -**Notice**: This document is a work-in-progress for researchers and implementers. - -## Table of contents - - - - -- [Introduction](#introduction) -- [Configuration](#configuration) -- [Fork to Phase 1](#fork-to-phase-1) - - [Fork trigger](#fork-trigger) - - [Upgrading the state](#upgrading-the-state) - - - -## Introduction - -This document describes the process of moving from Phase 0 to Phase 1 of Ethereum 2.0. - -## Configuration - -Warning: this configuration is not definitive. - -| Name | Value | -| - | - | -| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` | -| `PHASE_1_FORK_SLOT` | `Slot(0)` **TBD** | - -## Fork to Phase 1 - -### Fork trigger - -TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `PHASE_1_FORK_SLOT`, where `PHASE_1_FORK_SLOT % SLOTS_PER_EPOCH == 0`. - -### Upgrading the state - -After `process_slots` of Phase 0 finishes, if `state.slot == PHASE_1_FORK_SLOT`, an irregular state change is made to upgrade to Phase 1. - -```python -def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState: - epoch = get_current_epoch(pre) - post = BeaconState( - genesis_time=pre.genesis_time, - slot=pre.slot, - fork=Fork( - previous_version=pre.fork.current_version, - current_version=PHASE_1_FORK_VERSION, - epoch=epoch, - ), - # History - latest_block_header=pre.latest_block_header, - block_roots=pre.block_roots, - state_roots=pre.state_roots, - historical_roots=pre.historical_roots, - # Eth1 - eth1_data=pre.eth1_data, - eth1_data_votes=pre.eth1_data_votes, - eth1_deposit_index=pre.eth1_deposit_index, - # Registry - validators=List[Validator, VALIDATOR_REGISTRY_LIMIT]( - Validator( - pubkey=phase0_validator.pubkey, - withdrawal_credentials=phase0_validator.withdrawal_credentials, - effective_balance=phase0_validator.effective_balance, - slashed=phase0_validator.slashed, - activation_eligibility_epoch=phase0_validator.activation_eligibility_epoch, - activation_epoch=phase0_validator.activation_eligibility_epoch, - exit_epoch=phase0_validator.exit_epoch, - withdrawable_epoch=phase0_validator.withdrawable_epoch, - next_custody_secret_to_reveal=get_custody_period_for_validator(ValidatorIndex(i), epoch), - all_custody_secrets_revealed_epoch=FAR_FUTURE_EPOCH, - ) for i, phase0_validator in enumerate(pre.validators) - ), - balances=pre.balances, - # Randomness - randao_mixes=pre.randao_mixes, - # Slashings - slashings=pre.slashings, - # Attestations - # previous_epoch_attestations is cleared on upgrade. - previous_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](), - # empty in pre state, since the upgrade is performed just after an epoch boundary. - current_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](), - # Finality - justification_bits=pre.justification_bits, - previous_justified_checkpoint=pre.previous_justified_checkpoint, - current_justified_checkpoint=pre.current_justified_checkpoint, - finalized_checkpoint=pre.finalized_checkpoint, - # Phase 1 - current_epoch_start_shard=Shard(0), - shard_states=List[ShardState, MAX_SHARDS]( - ShardState( - slot=compute_previous_slot(pre.slot), - gasprice=MIN_GASPRICE, - latest_block_root=Root(), - ) for i in range(INITIAL_ACTIVE_SHARDS) - ), - online_countdown=[ONLINE_PERIOD] * len(pre.validators), # all online - current_light_committee=CompactCommittee(), # computed after state creation - next_light_committee=CompactCommittee(), - # Custody game - exposed_derived_secrets=[()] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS, - # exposed_derived_secrets will fully default to zeroes - ) - next_epoch = Epoch(epoch + 1) - post.current_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, epoch)) - post.next_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, next_epoch)) - return post -``` From 6d2d8cb982529c69f489878839ddfe790d53fffb Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:07:15 +0100 Subject: [PATCH 094/127] toc updates --- Makefile | 6 ++++- specs/custody/custody-game.md | 41 +++++++++++++++++++++++++++++++++- specs/custody/validator.md | 11 ++++++++- specs/das/das-internals.md | 3 +++ specs/das/das-p2p.md | 3 +++ specs/das/das-participation.md | 3 ++- specs/das/fork-choice.md | 2 ++ specs/sharding/beacon-chain.md | 3 +++ specs/sharding/p2p-phase1.md | 3 +++ 9 files changed, 71 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index d1ebe0fec..d4e7c1258 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,11 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER # To check generator matching: #$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}]) -MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md) +MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SSZ_DIR)/*.md) \ + $(wildcard $(SPEC_DIR)/merge/*.md) \ + $(wildcard $(SPEC_DIR)/custody/*.md) \ + $(wildcard $(SPEC_DIR)/das/*.md) \ + $(wildcard $(SPEC_DIR)/sharding/*.md) COV_HTML_OUT=.htmlcov COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html diff --git a/specs/custody/custody-game.md b/specs/custody/custody-game.md index c371e698c..d7f0f8c2a 100644 --- a/specs/custody/custody-game.md +++ b/specs/custody/custody-game.md @@ -8,7 +8,46 @@ -TODO +- [Introduction](#introduction) +- [Constants](#constants) + - [Misc](#misc) +- [Configuration](#configuration) + - [Time parameters](#time-parameters) + - [Max operations per block](#max-operations-per-block) + - [Size parameters](#size-parameters) + - [Reward and penalty quotients](#reward-and-penalty-quotients) +- [Data structures](#data-structures) + - [Extended types](#extended-types) + - [`Validator`](#validator) + - [`BeaconBlockBody`](#beaconblockbody) + - [`BeaconState`](#beaconstate) + - [New Beacon Chain operations](#new-beacon-chain-operations) + - [`CustodyChunkChallenge`](#custodychunkchallenge) + - [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord) + - [`CustodyChunkResponse`](#custodychunkresponse) + - [`CustodySlashing`](#custodyslashing) + - [`SignedCustodySlashing`](#signedcustodyslashing) + - [`CustodyKeyReveal`](#custodykeyreveal) + - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) +- [Helpers](#helpers) + - [`replace_empty_or_append`](#replace_empty_or_append) + - [`legendre_bit`](#legendre_bit) + - [`get_custody_atoms`](#get_custody_atoms) + - [`get_custody_secrets`](#get_custody_secrets) + - [`universal_hash_function`](#universal_hash_function) + - [`compute_custody_bit`](#compute_custody_bit) + - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) + - [`get_custody_period_for_validator`](#get_custody_period_for_validator) +- [Per-block processing](#per-block-processing) + - [Custody Game Operations](#custody-game-operations) + - [Chunk challenges](#chunk-challenges) + - [Custody chunk response](#custody-chunk-response) + - [Custody key reveals](#custody-key-reveals) + - [Early derived secret reveals](#early-derived-secret-reveals) + - [Custody Slashings](#custody-slashings) +- [Per-epoch processing](#per-epoch-processing) + - [Handling of reveal deadlines](#handling-of-reveal-deadlines) + - [Final updates](#final-updates) diff --git a/specs/custody/validator.md b/specs/custody/validator.md index 5e4696274..f4f497d7b 100644 --- a/specs/custody/validator.md +++ b/specs/custody/validator.md @@ -10,7 +10,16 @@ participating in the Ethereum 2.0 Phase 1 protocol. -TODO +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Becoming a validator](#becoming-a-validator) +- [Beacon chain validator assignments](#beacon-chain-validator-assignments) + - [Custody slashings](#custody-slashings) + - [Custody key reveals](#custody-key-reveals) + - [Early derived secret reveals](#early-derived-secret-reveals) + - [Construct attestation](#construct-attestation) +- [How to avoid slashing](#how-to-avoid-slashing) + - [Custody slashing](#custody-slashing) diff --git a/specs/das/das-internals.md b/specs/das/das-internals.md index fad656e29..1b65eeacd 100644 --- a/specs/das/das-internals.md +++ b/specs/das/das-internals.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -23,6 +24,8 @@ - [DAS functions](#das-functions) + + ## Custom types diff --git a/specs/das/das-p2p.md b/specs/das/das-p2p.md index e1579576d..33c4f2726 100644 --- a/specs/das/das-p2p.md +++ b/specs/das/das-p2p.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -25,6 +26,8 @@ - [DASQuery](#dasquery) + + ## Introduction diff --git a/specs/das/das-participation.md b/specs/das/das-participation.md index 63ceb240a..fd432ea8e 100644 --- a/specs/das/das-participation.md +++ b/specs/das/das-participation.md @@ -4,7 +4,7 @@ ## Table of contents - + @@ -20,6 +20,7 @@ - [Stage 2: Pulling missing data from validators with custody.](#stage-2-pulling-missing-data-from-validators-with-custody) + ## Data Availability Sampling diff --git a/specs/das/fork-choice.md b/specs/das/fork-choice.md index 188de244d..4512e5b68 100644 --- a/specs/das/fork-choice.md +++ b/specs/das/fork-choice.md @@ -3,6 +3,7 @@ **Notice**: This document is a work-in-progress for researchers and implementers. ## Table of contents + @@ -13,6 +14,7 @@ + ## Introduction This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. The only change that we add from phase 0 is that we add a concept of "data dependencies"; diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 782db226f..ca3b5d5fd 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -51,6 +52,8 @@ - [Custody game updates](#custody-game-updates) + + ## Introduction diff --git a/specs/sharding/p2p-phase1.md b/specs/sharding/p2p-phase1.md index c17bea36f..727ddbeb7 100644 --- a/specs/sharding/p2p-phase1.md +++ b/specs/sharding/p2p-phase1.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -14,6 +15,8 @@ - [Shard header: `shard_header`](#shard-header-shard_header) + + ## Introduction From c748c1d7d564e2dbed52f3933ed249bcdd0fe63f Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:20:53 +0100 Subject: [PATCH 095/127] cleanup sharding docs --- specs/sharding/beacon-chain.md | 2 +- specs/sharding/p2p-phase1.md | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index ca3b5d5fd..4b0ec4db3 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- The Beacon Chain with Shards +# Ethereum 2.0 Sharding -- Beacon Chain changes **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/sharding/p2p-phase1.md b/specs/sharding/p2p-phase1.md index 727ddbeb7..f6e4ced37 100644 --- a/specs/sharding/p2p-phase1.md +++ b/specs/sharding/p2p-phase1.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Network specification +# Ethereum 2.0 Sharding -- Network specification **Notice**: This document is a work-in-progress for researchers and implementers. @@ -9,7 +9,7 @@ - [Introduction](#introduction) -- [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push) +- [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - [Shard blobs: `shard_blob_{shard}`](#shard-blobs-shard_blob_shard) - [Shard header: `shard_header`](#shard-header-shard_header) @@ -23,14 +23,14 @@ With Phase 1, shard data is introduced, which requires various new additions and adjustments to the groundwork that Phase 0 implements. The specification of these changes continues in the same format, and assumes Phase0 as pre-requisite. The Phase 0 adjustments and additions for Shards are outlined in this document. -See the [Data Availability Sampling network specification](./das-p2p.md) for Phase 1 networking specific to Data availability. -## DAS in the Gossip domain: Push +## Gossip domain ### Topics and messages Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: + | Name | Message Type | |----------------------------------|---------------------------| | `shard_blob_{shard}` | `SignedShardBlob` | @@ -42,8 +42,6 @@ The [DAS network specification](./das-p2p.md) defines additional topics. Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets. -The [DAS networking specification](./das-p2p.md#horizontal-subnets) outlines an extension of the regular behavior on this topic. - The following validations MUST pass before forwarding the `signed_blob` (with inner `blob`) on the horizontal subnet or creating samples for it. - _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range) - _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- From 1acb1d6f70f981a339aff154e867f0bd8faa848c Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:21:09 +0100 Subject: [PATCH 096/127] disable phase1 pyspec build --- setup.py | 62 -------------------------------------------------------- 1 file changed, 62 deletions(-) diff --git a/setup.py b/setup.py index 9de8ef911..91baf978f 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,6 @@ FUNCTION_REGEX = r'^def [\w_]*' # Definitions in context.py PHASE0 = 'phase0' ALTAIR = 'altair' -PHASE1 = 'phase1' class SpecObject(NamedTuple): @@ -141,40 +140,7 @@ SSZObject = TypeVar('SSZObject', bound=View) CONFIG_NAME = 'mainnet' ''' -PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0 -from eth2spec.config.config_util import apply_constants_config -from typing import ( - Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional -) -from typing import List as PyList -from dataclasses import ( - dataclass, - field, -) - -from lru import LRU - -from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes -from eth2spec.utils.ssz.ssz_typing import ( - View, boolean, Container, List, Vector, uint8, uint32, uint64, bit, - ByteList, ByteVector, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, -) -from eth2spec.utils import bls - -from eth2spec.utils.hash_function import hash - -# Whenever phase 1 is loaded, make sure we have the latest phase0 -from importlib import reload -reload(phase0) - - -SSZVariableName = str -GeneralizedIndex = NewType('GeneralizedIndex', int) -SSZObject = TypeVar('SSZObject', bound=View) - -CONFIG_NAME = 'mainnet' -''' ALTAIR_IMPORTS = '''from eth2spec.phase0 import spec as phase0 from eth2spec.config.config_util import apply_constants_config from typing import ( @@ -294,14 +260,6 @@ get_attesting_indices = cache_this( _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)''' -PHASE1_SUNDRY_FUNCTIONS = ''' - -_get_start_shard = get_start_shard -get_start_shard = cache_this( - lambda state, slot: (state.validators.hash_tree_root(), slot), - _get_start_shard, lru_size=SLOTS_PER_EPOCH * 3)''' - - ALTAIR_SUNDRY_FUNCTIONS = ''' def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex: @@ -327,10 +285,6 @@ def is_altair(fork): return fork == ALTAIR -def is_phase1(fork): - return fork == PHASE1 - - def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_class_objects: Dict[str, str]) -> str: """ Given all the objects that constitute a spec, combine them into a single pyfile. @@ -370,7 +324,6 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_cl # Functions to make pyspec work + '\n' + PHASE0_SUNDRY_FUNCTIONS + ('\n' + ALTAIR_SUNDRY_FUNCTIONS if is_altair(fork) else '') - + ('\n' + PHASE1_SUNDRY_FUNCTIONS if is_phase1(fork) else '') ) # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are @@ -461,7 +414,6 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject: fork_imports = { 'phase0': PHASE0_IMPORTS, - 'phase1': PHASE1_IMPORTS, 'altair': ALTAIR_IMPORTS, } @@ -515,20 +467,6 @@ class PySpecCommand(Command): specs/phase0/validator.md specs/phase0/weak-subjectivity.md """ - elif is_phase1(self.spec_fork): - self.md_doc_paths = """ - specs/phase0/beacon-chain.md - specs/phase0/fork-choice.md - specs/phase0/validator.md - specs/phase0/weak-subjectivity.md - specs/phase1/custody-game.md - specs/phase1/beacon-chain.md - specs/phase1/shard-transition.md - specs/phase1/fork-choice.md - specs/phase1/fork.md - specs/phase1/shard-fork-choice.md - specs/phase1/validator.md - """ elif is_altair(self.spec_fork): self.md_doc_paths = """ specs/phase0/beacon-chain.md From 306fc95c60a29dd9817ad3267e9217f5cfad1368 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:33:07 +0100 Subject: [PATCH 097/127] Update doc names and sharding readme section --- README.md | 21 +++++++++++++++---- specs/das/{das-internals.md => das-core.md} | 2 +- specs/das/fork-choice.md | 2 +- specs/das/{das-p2p.md => p2p-interface.md} | 10 ++++----- .../das/{das-participation.md => sampling.md} | 2 +- .../{p2p-phase1.md => p2p-interface.md} | 0 6 files changed, 25 insertions(+), 12 deletions(-) rename specs/das/{das-internals.md => das-core.md} (99%) rename specs/das/{das-p2p.md => p2p-interface.md} (96%) rename specs/das/{das-participation.md => sampling.md} (98%) rename specs/sharding/{p2p-phase1.md => p2p-interface.md} (100%) diff --git a/README.md b/README.md index 42ad7e71f..1bae65f96 100644 --- a/README.md +++ b/README.md @@ -27,14 +27,27 @@ Core specifications for Eth2 clients be found in [specs](specs/). These are divi * [Altair fork](specs/altair/fork.md) * [Light client sync protocol](specs/altair/sync-protocol.md) -### Sharding - -The sharding spec is still actively in R&D; see the most recent available pull request [here](https://github.com/ethereum/eth2.0-specs/pull/2146) and some technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD). - ### Merge The merge is still actively in R&D; see an [ethresear.ch](https://ethresear.ch) post describing the proposed basic mechanism [here](https://ethresear.ch/t/the-eth1-eth2-transition/6265) and the section of [ethereum.org](https://ethereum.org) describing the merge at a high level [here](https://ethereum.org/en/eth2/docking/). +### Sharding + +Sharding follows the merge, and is divided into three parts: + +* Sharding base functionality + * [Beacon Chain changes](specs/sharding/beacon-chain.md) + * [P2P Network changes](specs/sharding/p2p-interface.md) +* Proof of Custody + * [Custody Game](specs/custody/custody-game.md) + * [Validator custody work](specs/custody/validator.md) +* Data Availability Sampling + * Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD). + * [Core types and functions](specs/das/das-core.md) + * [P2P Networking](specs/das/p2p-interface.md) + * [Fork Choice](specs/das/fork-choice.md) + * [Sampling process](specs/das/sampling.md) + ### Accompanying documents can be found in [specs](specs) and include: * [SimpleSerialize (SSZ) spec](ssz/simple-serialize.md) diff --git a/specs/das/das-internals.md b/specs/das/das-core.md similarity index 99% rename from specs/das/das-internals.md rename to specs/das/das-core.md index 1b65eeacd..5cfb9af1c 100644 --- a/specs/das/das-internals.md +++ b/specs/das/das-core.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Data Availability Sampling - Internals +# Ethereum 2.0 Data Availability Sampling Core **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/das/fork-choice.md b/specs/das/fork-choice.md index 4512e5b68..ae105c8ef 100644 --- a/specs/das/fork-choice.md +++ b/specs/das/fork-choice.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Beacon Chain Fork Choice +# Ethereum 2.0 Data Availability Sampling Fork Choice **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/das/das-p2p.md b/specs/das/p2p-interface.md similarity index 96% rename from specs/das/das-p2p.md rename to specs/das/p2p-interface.md index 33c4f2726..0c8c211d4 100644 --- a/specs/das/das-p2p.md +++ b/specs/das/p2p-interface.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Data Availability Sampling - Network specification +# Ethereum 2.0 Data Availability Sampling Network specification **Notice**: This document is a work-in-progress for researchers and implementers. @@ -32,7 +32,7 @@ ## Introduction -For an introduction about DAS itself, see [the DAS participation spec](./das-participation.md#data-availability-sampling). +For an introduction about DAS itself, see [the DAS participation spec](sampling.md#data-availability-sampling). This is not a pre-requisite for the network layer, but will give you valuable context. For sampling, all nodes need to query for `k` random samples each slot. @@ -132,11 +132,11 @@ These subscriptions rotate slowly, and with different offsets per node identity # TODO hash function: (node, time)->subnets ``` -Backbone subscription work is outlined in the [DAS participation spec](./das-participation.md#slow-rotation-backbone) +Backbone subscription work is outlined in the [DAS participation spec](sampling.md#slow-rotation-backbone) #### Quick Rotation: Sampling -A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS participation spec](./das-participation.md#quick-rotation-sampling). +A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS participation spec](sampling.md#quick-rotation-sampling). If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore, and if necessary in the DHT, for peers in the topic backbone. ## DAS in the Gossip domain: Push @@ -161,7 +161,7 @@ Take `blob = signed_blob.blob`: 2. Create samples with proofs: `samples = sample_data(blob.slot, blob.shard, extended_data)` 3. Fanout-publish the samples to the vertical subnets of its peers (not all vertical subnets may be reached). -The [DAS participation spec](./das-participation.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets. +The [DAS participation spec](sampling.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets. #### Vertical subnets: `das_sample_{subnet_index}` diff --git a/specs/das/das-participation.md b/specs/das/sampling.md similarity index 98% rename from specs/das/das-participation.md rename to specs/das/sampling.md index fd432ea8e..aedcf5fd5 100644 --- a/specs/das/das-participation.md +++ b/specs/das/sampling.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Data Availability Sampling - Participation +# Ethereum 2.0 Data Availability Sampling **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/sharding/p2p-phase1.md b/specs/sharding/p2p-interface.md similarity index 100% rename from specs/sharding/p2p-phase1.md rename to specs/sharding/p2p-interface.md From 0b8e3aee440617aad6758b953833603f323e308f Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:38:18 +0100 Subject: [PATCH 098/127] move shard blob types from das to sharding spec --- specs/das/das-core.md | 30 ------------------------------ specs/sharding/p2p-interface.md | 31 +++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/specs/das/das-core.md b/specs/das/das-core.md index 5cfb9af1c..71bb5d8ec 100644 --- a/specs/das/das-core.md +++ b/specs/das/das-core.md @@ -13,8 +13,6 @@ - [Misc](#misc) - [New containers](#new-containers) - [DASSample](#dassample) - - [ShardBlob](#shardblob) - - [SignedShardBlob](#signedshardblob) - [Helper functions](#helper-functions) - [Reverse bit ordering](#reverse-bit-ordering) - [`reverse_bit_order`](#reverse_bit_order) @@ -59,34 +57,6 @@ class DASSample(Container): data: Vector[BLSPoint, POINTS_PER_SAMPLE] ``` -### ShardBlob - -The blob of data, effectively a block. Network-only. - -```python -class ShardBlob(Container): - # Slot and shard that this blob is intended for - slot: Slot - shard: Shard - # The actual data - data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] -``` - -Note that the hash-tree-root of the `ShardBlob` does not match the `ShardHeader`, -since the blob deals with full data, whereas the header includes the KZG commitment instead. - -### SignedShardBlob - -Network-only. - -```python -class SignedShardBlob(Container): - blob: ShardBlob - # The signature, the message is the commitment on the blob - signature: BLSSignature -``` - - ## Helper functions ### Reverse bit ordering diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index f6e4ced37..86c836c38 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -9,6 +9,9 @@ - [Introduction](#introduction) +- [New containers](#new-containers) + - [ShardBlob](#shardblob) + - [SignedShardBlob](#signedshardblob) - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - [Shard blobs: `shard_blob_{shard}`](#shard-blobs-shard_blob_shard) @@ -24,6 +27,34 @@ With Phase 1, shard data is introduced, which requires various new additions and The specification of these changes continues in the same format, and assumes Phase0 as pre-requisite. The Phase 0 adjustments and additions for Shards are outlined in this document. +## New containers + +### ShardBlob + +The blob of data, effectively a block. Network-only. + +```python +class ShardBlob(Container): + # Slot and shard that this blob is intended for + slot: Slot + shard: Shard + # The actual data + data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] +``` + +Note that the hash-tree-root of the `ShardBlob` does not match the `ShardHeader`, +since the blob deals with full data, whereas the header includes the KZG commitment instead. + +### SignedShardBlob + +Network-only. + +```python +class SignedShardBlob(Container): + blob: ShardBlob + # The signature, the message is the commitment on the blob + signature: BLSSignature +``` ## Gossip domain From 8542d349bf6ad95df77291c460c72ffd76700a48 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 00:53:55 +0100 Subject: [PATCH 099/127] update sharding p2p doc --- specs/sharding/p2p-interface.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 86c836c38..7e8c40dcf 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -23,27 +23,27 @@ ## Introduction -With Phase 1, shard data is introduced, which requires various new additions and adjustments to the groundwork that Phase 0 implements. -The specification of these changes continues in the same format, and assumes Phase0 as pre-requisite. -The Phase 0 adjustments and additions for Shards are outlined in this document. +The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and +[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite. +The adjustments and additions for Shards are outlined in this document. ## New containers ### ShardBlob -The blob of data, effectively a block. Network-only. +Network-only. ```python class ShardBlob(Container): # Slot and shard that this blob is intended for slot: Slot shard: Shard - # The actual data + # The actual data. Represented in header as data commitment and degree proof data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK] ``` Note that the hash-tree-root of the `ShardBlob` does not match the `ShardHeader`, -since the blob deals with full data, whereas the header includes the KZG commitment instead. +since the blob deals with full data, whereas the header includes the KZG commitment and degree proof instead. ### SignedShardBlob @@ -51,7 +51,7 @@ Network-only. ```python class SignedShardBlob(Container): - blob: ShardBlob + message: ShardBlob # The signature, the message is the commitment on the blob signature: BLSSignature ``` From f618f3c59d4bc60913c775d8fb85244988d3117f Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 01:06:37 +0100 Subject: [PATCH 100/127] move custody-specific operations to custody spec --- specs/custody/custody-game.md | 49 ++++++++++++++++++++++++++++++++++ specs/sharding/beacon-chain.md | 28 +++++++++---------- 2 files changed, 61 insertions(+), 16 deletions(-) diff --git a/specs/custody/custody-game.md b/specs/custody/custody-game.md index d7f0f8c2a..95bdf4b7e 100644 --- a/specs/custody/custody-game.md +++ b/specs/custody/custody-game.md @@ -39,6 +39,7 @@ - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) - [`get_custody_period_for_validator`](#get_custody_period_for_validator) - [Per-block processing](#per-block-processing) + - [Block processing](#block-processing) - [Custody Game Operations](#custody-game-operations) - [Chunk challenges](#chunk-challenges) - [Custody chunk response](#custody-chunk-response) @@ -46,6 +47,7 @@ - [Early derived secret reveals](#early-derived-secret-reveals) - [Custody Slashings](#custody-slashings) - [Per-epoch processing](#per-epoch-processing) + - [Epoch transition](#epoch-transition) - [Handling of reveal deadlines](#handling-of-reveal-deadlines) - [Final updates](#final-updates) @@ -348,6 +350,18 @@ def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epo ## Per-block processing +### Block processing + +```python +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_light_client_aggregate(state, block.body) + process_operations(state, block.body) + process_custody_game_operations(state, block.body) +``` + ### Custody Game Operations ```python @@ -605,6 +619,41 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed ## Per-epoch processing +### Epoch transition + +This epoch transition overrides the phase0 epoch transition: + +```python +def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + + # Proof of custody + process_reveal_deadlines(state) + process_challenge_deadlines(state) + + process_slashings(state) + + # Sharding + process_pending_headers(state) + charge_confirmed_header_fees(state) + reset_pending_headers(state) + + # Final updates + # Phase 0 + process_eth1_data_reset(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_roots_update(state) + process_participation_record_updates(state) + # Proof of custody + process_custody_final_updates(state) + + process_shard_epoch_increment(state) +``` + ### Handling of reveal deadlines ```python diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 4b0ec4db3..b578570a2 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -49,7 +49,7 @@ - [`process_shard_header`](#process_shard_header) - [Epoch transition](#epoch-transition) - [Pending headers](#pending-headers) - - [Custody game updates](#custody-game-updates) + - [Shard epoch increment](#shard-epoch-increment) @@ -125,7 +125,7 @@ We define the following Python custom types for type hinting and readability: ## Updated containers -The following containers have updated definitions in Phase 1. +The following containers have updated definitions to support Sharding. ### `AttestationData` @@ -167,7 +167,8 @@ class BeaconState(phase0.BeaconState): ## New containers -The following containers are new in Phase 1. +The shard data itself is network-layer only, and can be found in the [P2P specification](./p2p-interface.md). +The beacon chain registers just the commitments of the shard data. ### `DataCommitment` @@ -435,9 +436,6 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) - - # See custody game spec. - process_custody_game_operations(state, body) ``` ### New Attestation processing @@ -550,10 +548,6 @@ def process_epoch(state: BeaconState) -> None: process_rewards_and_penalties(state) process_registry_updates(state) - # Proof of custody - process_reveal_deadlines(state) - process_challenge_deadlines(state) - process_slashings(state) # Sharding @@ -569,10 +563,8 @@ def process_epoch(state: BeaconState) -> None: process_randao_mixes_reset(state) process_historical_roots_update(state) process_participation_record_updates(state) - # Proof of custody - process_custody_final_updates(state) - # Update current_epoch_start_shard - state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1)) + + process_shard_epoch_increment(state) ``` #### Pending headers @@ -682,6 +674,10 @@ def reset_pending_headers(state: BeaconState) -> None: )) ``` -#### Custody game updates +#### Shard epoch increment -`process_reveal_deadlines`, `process_challenge_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./custody-game.md). +```python +def process_shard_epoch_increment(state: BeaconState) -> None: + # Update current_epoch_start_shard + state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1)) +``` From d4c057a0bdd38fd2313a6ea9d8b45dbfbbc91dc3 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 01:09:15 +0100 Subject: [PATCH 101/127] bls point type back to sharding spec --- specs/das/das-core.md | 1 - specs/sharding/beacon-chain.md | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/das/das-core.md b/specs/das/das-core.md index 71bb5d8ec..c6cbc09e3 100644 --- a/specs/das/das-core.md +++ b/specs/das/das-core.md @@ -32,7 +32,6 @@ We define the following Python custom types for type hinting and readability: | Name | SSZ equivalent | Description | | - | - | - | | `SampleIndex` | `uint64` | A sample index, corresponding to chunk of extended data | -| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` | ## Configuration diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index b578570a2..fcb05d621 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -70,6 +70,7 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `Shard` | `uint64` | A shard number | | `BLSCommitment` | `bytes48` | A G1 curve point | +| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` | ## Configuration From 4e4d0eb24ac4630810e0f32276eaab99f55bfc2a Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 01:12:46 +0100 Subject: [PATCH 102/127] update custody doc wording --- specs/custody/custody-game.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/specs/custody/custody-game.md b/specs/custody/custody-game.md index 95bdf4b7e..de22548b2 100644 --- a/specs/custody/custody-game.md +++ b/specs/custody/custody-game.md @@ -57,7 +57,8 @@ ## Introduction -This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [Phase 0](../phase0/beacon-chain.md) specification. +This document details the beacon chain additions and changes of Ethereum 2.0 to support the shard data custody game, +building upon the [Sharding](../sharding/beacon-chain.md) specification. ## Constants @@ -115,7 +116,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether #### `Validator` ```python -class Validator(phase0.Validator): +class Validator(sharding.Validator): # next_custody_secret_to_reveal is initialised to the custody period # (of the particular validator) in which the validator is activated # = get_custody_period_for_validator(...) @@ -128,7 +129,7 @@ class Validator(phase0.Validator): #### `BeaconBlockBody` ```python -class BeaconBlockBody(phase0.BeaconBlockBody): +class BeaconBlockBody(sharding.BeaconBlockBody): # Custody game chunk_challenges: List[CustodyChunkChallenge, MAX_CUSTODY_CHUNK_CHALLENGES] chunk_challenge_responses: List[CustodyChunkResponse, MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES] @@ -140,7 +141,7 @@ class BeaconBlockBody(phase0.BeaconBlockBody): #### `BeaconState` ```python -class BeaconState(phase0.BeaconState): +class BeaconState(sharding.BeaconState): # Future derived secrets already exposed; contains the indices of the exposed validator # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH], From b627f708b9d3ffa9a6fe4476924711177d2104ef Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 01:12:55 +0100 Subject: [PATCH 103/127] fix body type --- specs/sharding/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index fcb05d621..a83101af0 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -143,10 +143,10 @@ class AttestationData(Container): shard_header_root: Root ``` -### `BeaconBlock` +### `BeaconBlockBody` ```python -class BeaconBlock(phase0.BeaconBlock): +class BeaconBlockBody(phase0.BeaconBlockBody): # insert phase 0 fields shard_headers: List[SignedShardHeader, MAX_SHARD_HEADERS] ``` From 65dbf6a5c4d3197f36804007f283b3d9daced2e5 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 18 Mar 2021 01:26:03 +0100 Subject: [PATCH 104/127] toc --- specs/sharding/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index a83101af0..dfb04963b 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -19,7 +19,7 @@ - [Domain types](#domain-types) - [Updated containers](#updated-containers) - [`AttestationData`](#attestationdata) - - [`BeaconBlock`](#beaconblock) + - [`BeaconBlockBody`](#beaconblockbody) - [`BeaconState`](#beaconstate) - [New containers](#new-containers) - [`DataCommitment`](#datacommitment) From 112056fa8044e1fa6e4a005eec9802e2ae5bbd47 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 26 Mar 2021 22:38:52 +0100 Subject: [PATCH 105/127] add custom types to TOC --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index ce6df0dd9..55994188a 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -11,6 +11,7 @@ - [Introduction](#introduction) +- [Custom types](#custom-types) - [Constants](#constants) - [Transition](#transition) - [Execution](#execution) @@ -19,7 +20,6 @@ - [`BeaconBlockBody`](#beaconblockbody) - [`BeaconState`](#beaconstate) - [New containers](#new-containers) - - [`Transaction`](#transaction) - [`ApplicationPayload`](#applicationpayload) - [Helper functions](#helper-functions) - [Misc](#misc) From 14cb996613fbf2cadc33b9d2723fb43e76e00362 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 26 Mar 2021 22:53:00 +0100 Subject: [PATCH 106/127] split phase1 tests into proof-of-custody and sharding tests, drop old shard-transition specific tests --- tests/core/pyspec/eth2spec/test/context.py | 39 +-- .../test_process_shard_transition.py | 197 ------------ .../test/phase1/sanity/test_shard_blocks.py | 251 ---------------- .../fork_choice/test_on_shard_block.py | 280 ------------------ .../{phase1 => proof_of_custody}/__init__.py | 0 .../block_processing/__init__.py | 0 .../test_process_attestation.py | 0 .../test_process_chunk_challenge.py | 0 .../test_process_custody_key_reveal.py | 0 .../test_process_custody_slashing.py | 0 ...est_process_early_derived_secret_reveal.py | 0 .../epoch_processing/__init__.py | 0 .../test_process_challenge_deadlines.py | 0 .../test_process_custody_final_updates.py | 0 .../test_process_reveal_deadlines.py | 0 .../sanity/__init__.py | 0 .../sanity/test_blocks.py | 0 .../unittests => sharding}/__init__.py | 0 .../test/sharding/unittests/__init__.py | 0 .../unittests/test_get_start_shard.py | 0 20 files changed, 5 insertions(+), 762 deletions(-) delete mode 100644 tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_shard_transition.py delete mode 100644 tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py delete mode 100644 tests/core/pyspec/eth2spec/test/phase1/unittests/fork_choice/test_on_shard_block.py rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/block_processing/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/block_processing/test_process_attestation.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/block_processing/test_process_chunk_challenge.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/block_processing/test_process_custody_key_reveal.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/block_processing/test_process_custody_slashing.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/block_processing/test_process_early_derived_secret_reveal.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/epoch_processing/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/epoch_processing/test_process_challenge_deadlines.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/epoch_processing/test_process_custody_final_updates.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/epoch_processing/test_process_reveal_deadlines.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/sanity/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1 => proof_of_custody}/sanity/test_blocks.py (100%) rename tests/core/pyspec/eth2spec/test/{phase1/unittests => sharding}/__init__.py (100%) create mode 100644 tests/core/pyspec/eth2spec/test/sharding/unittests/__init__.py rename tests/core/pyspec/eth2spec/test/{phase1 => sharding}/unittests/test_get_start_shard.py (100%) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index caf44e983..7e06576e1 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -1,7 +1,6 @@ import pytest from eth2spec.phase0 import spec as spec_phase0 -from eth2spec.phase1 import spec as spec_phase1 from eth2spec.altair import spec as spec_altair from eth2spec.utils import bls @@ -19,7 +18,6 @@ from importlib import reload def reload_specs(): reload(spec_phase0) - reload(spec_phase1) reload(spec_altair) @@ -29,10 +27,9 @@ SpecForkName = NewType("SpecForkName", str) ConfigName = NewType("ConfigName", str) PHASE0 = SpecForkName('phase0') -PHASE1 = SpecForkName('phase1') ALTAIR = SpecForkName('altair') -ALL_PHASES = (PHASE0, PHASE1, ALTAIR) +ALL_PHASES = (PHASE0, ALTAIR) # TODO add merge, sharding, proof_of_custody and das as phases. MAINNET = ConfigName('mainnet') MINIMAL = ConfigName('minimal') @@ -54,10 +51,6 @@ class SpecPhase0(Spec): ... -class SpecPhase1(Spec): - ... - - class SpecAltair(Spec): ... @@ -65,7 +58,6 @@ class SpecAltair(Spec): # add transfer, bridge, etc. as the spec evolves class SpecForks(TypedDict, total=False): PHASE0: SpecPhase0 - PHASE1: SpecPhase1 ALTAIR: SpecAltair @@ -78,11 +70,8 @@ def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Ca state = create_genesis_state(spec=p0, validator_balances=balances, activation_threshold=activation_threshold) - if spec.fork == PHASE1: - # TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper. - # Decide based on performance/consistency results later. - state = phases[PHASE1].upgrade_to_phase1(state) - elif spec.fork == ALTAIR: + # TODO: upgrade to merge spec, and later sharding. + if spec.fork == ALTAIR: state = phases[ALTAIR].upgrade_to_altair(state) return state @@ -341,24 +330,20 @@ def with_phases(phases, other_phases=None): if other_phases is not None: available_phases |= set(other_phases) - # TODO: test state is dependent on phase0 but is immediately transitioned to phase1. - # A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0 + # TODO: test state is dependent on phase0 but is immediately transitioned to later phases. + # A new state-creation helper for later phases may be in place, and then tests can run without phase0 available_phases.add(PHASE0) # Populate all phases for multi-phase tests phase_dir = {} if PHASE0 in available_phases: phase_dir[PHASE0] = spec_phase0 - if PHASE1 in available_phases: - phase_dir[PHASE1] = spec_phase1 if ALTAIR in available_phases: phase_dir[ALTAIR] = spec_altair # return is ignored whenever multiple phases are ran. If if PHASE0 in run_phases: ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw) - if PHASE1 in run_phases: - ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw) if ALTAIR in run_phases: ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw) return ret @@ -382,20 +367,6 @@ def with_configs(configs, reason=None): return decorator -def only_full_crosslink(fn): - def is_full_crosslink(spec, state): - epoch = spec.compute_epoch_at_slot(state.slot) - return spec.get_committee_count_per_slot(state, epoch) >= spec.get_active_shard_count(state) - - def wrapper(*args, spec: Spec, state: Any, **kw): - # TODO: update condition to "phase1+" if we have phase2 - if spec.fork == PHASE1 and not is_full_crosslink(spec, state): - dump_skipping_message("only for full crosslink") - return None - return fn(*args, spec=spec, state=state, **kw) - return wrapper - - def is_post_altair(spec): if spec.fork in [PHASE0, PHASE1]: # TODO: PHASE1 fork is temporarily parallel to ALTAIR. diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_shard_transition.py b/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_shard_transition.py deleted file mode 100644 index 2ee2d34c2..000000000 --- a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_shard_transition.py +++ /dev/null @@ -1,197 +0,0 @@ -from eth2spec.test.context import ( - PHASE0, - ALTAIR, - with_all_phases_except, - only_full_crosslink, - spec_state_test, -) -from eth2spec.test.helpers.attestations import ( - get_valid_attestation, - get_valid_on_time_attestation, - run_attestation_processing, -) -from eth2spec.test.helpers.shard_transitions import ( - run_shard_transitions_processing, -) -from eth2spec.test.helpers.shard_block import ( - build_shard_block, - get_shard_transitions, - get_sample_shard_block_body, - get_committee_index_of_shard, -) -from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot, next_slot - - -def get_initial_env(spec, state, target_len_offset_slot): - transition_to_valid_shard_slot(spec, state) - committee_index = spec.CommitteeIndex(0) - target_shard_slot = state.slot + target_len_offset_slot - 1 - shard = spec.compute_shard_from_committee_index(state, committee_index, target_shard_slot) - assert state.shard_states[shard].slot == state.slot - 1 - return state, shard, target_shard_slot - - -def get_attestations_and_shard_transitions(spec, state, shard_block_dict): - shard_transitions = get_shard_transitions(spec, state, shard_block_dict) - attestations = [ - get_valid_on_time_attestation( - spec, state, - index=get_committee_index_of_shard(spec, state, state.slot, shard), - shard_transition=shard_transition, - signed=True, - ) - for shard, shard_transition in enumerate(shard_transitions) - if shard_transition != spec.ShardTransition() - ] - return attestations, shard_transitions - - -def run_successful_crosslink_tests(spec, state, target_len_offset_slot): - state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot) - init_slot = state.slot - - # Create SignedShardBlock at init_slot - shard_block = build_shard_block( - spec, state, shard, - slot=init_slot, body=get_sample_shard_block_body(spec, is_max=True), signed=True - ) - - # Transition state to target shard slot - transition_to(spec, state, target_shard_slot) - - # Create a shard_transitions that would be included at beacon block `target_shard_slot + 1` - shard_block_dict = {shard: [shard_block]} - attestations, shard_transitions = get_attestations_and_shard_transitions(spec, state, shard_block_dict) - - next_slot(spec, state) - - for attestation in attestations: - _, _, _ = run_attestation_processing(spec, state, attestation) - - _, winning_roots = spec.get_shard_winning_roots(state, attestations) - assert len(winning_roots) == 1 - shard_transition = shard_transitions[shard] - assert winning_roots[0] == shard_transition.hash_tree_root() - - pre_gasprice = state.shard_states[shard].gasprice - pre_shard_states = state.shard_states.copy() - yield from run_shard_transitions_processing(spec, state, shard_transitions, attestations) - - for index, shard_state in enumerate(state.shard_states): - if index == shard: - assert shard_state != pre_shard_states[index] - assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1] - assert shard_state.latest_block_root == shard_block.message.hash_tree_root() - if target_len_offset_slot == 1: - assert shard_state.gasprice > pre_gasprice - else: - assert shard_state == pre_shard_states[index] - - for pending_attestation in state.current_epoch_attestations: - assert bool(pending_attestation.crosslink_success) is True - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_basic_crosslinks(spec, state): - yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=1) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_multiple_offset_slots(spec, state): - yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=2) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_no_winning_root(spec, state): - state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot=1) - init_slot = state.slot - - # Create SignedShardBlock at init_slot - shard_block = build_shard_block( - spec, state, shard, - slot=init_slot, body=get_sample_shard_block_body(spec, is_max=True), signed=True - ) - - # Transition state to target shard slot - transition_to(spec, state, target_shard_slot) - - # Create a shard_transitions that would be included at beacon block `target_shard_slot + 1` - shard_transitions = get_shard_transitions(spec, state, {shard: [shard_block]}) - shard_transition = shard_transitions[shard] - committee_index = get_committee_index_of_shard(spec, state, state.slot, shard) - attestation = get_valid_attestation( - spec, state, - index=committee_index, - shard_transition=shard_transition, - # Decrease attested participants to 1/3 committee - filter_participant_set=lambda committee: set(list(committee)[:len(committee) // 3]), - signed=True, - on_time=True, - ) - - next_slot(spec, state) - - _, _, _ = run_attestation_processing(spec, state, attestation) - - _, winning_roots = spec.get_shard_winning_roots(state, [attestation]) - assert len(winning_roots) == 0 - - # No winning root, shard_transitions[shard] is empty - shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS - pre_shard_states = state.shard_states.copy() - yield from run_shard_transitions_processing(spec, state, shard_transitions, [attestation]) - - for pending_attestation in state.current_epoch_attestations: - assert bool(pending_attestation.crosslink_success) is False - - assert state.shard_states == pre_shard_states - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_wrong_shard_transition_root(spec, state): - state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot=1) - init_slot = state.slot - - # Create SignedShardBlock at init_slot - shard_block = build_shard_block( - spec, state, shard, - slot=init_slot, body=get_sample_shard_block_body(spec, is_max=True), signed=True - ) - - # Transition state to target shard slot - transition_to(spec, state, target_shard_slot) - - # Create a shard_transitions that would be included at beacon block `target_shard_slot + 1` - shard_transitions = get_shard_transitions(spec, state, {shard: [shard_block]}) - shard_transition = shard_transitions[shard] - wrong_shard_transition = shard_transition.copy() - wrong_shard_transition.shard_states[shard].gasprice = shard_transition.shard_states[shard].gasprice + 1 - committee_index = get_committee_index_of_shard(spec, state, state.slot, shard) - attestation = get_valid_attestation( - spec, state, - index=committee_index, - shard_transition=wrong_shard_transition, - signed=True, - on_time=True, - ) - attestations = [attestation] - - next_slot(spec, state) - - run_attestation_processing(spec, state, attestation) - - # Check if winning root != shard_transition.hash_tree_root() - _, winning_roots = spec.get_shard_winning_roots(state, attestations) - assert len(winning_roots) == 1 - shard_transition = shard_transitions[shard] - assert winning_roots[0] != shard_transition.hash_tree_root() - - yield from run_shard_transitions_processing(spec, state, shard_transitions, attestations, valid=False) diff --git a/tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py b/tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py deleted file mode 100644 index d27dacc7d..000000000 --- a/tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py +++ /dev/null @@ -1,251 +0,0 @@ -from eth2spec.test.context import ( - PHASE0, - ALTAIR, - always_bls, - expect_assertion_error, - spec_state_test, - with_all_phases_except, - only_full_crosslink, -) -from eth2spec.test.helpers.shard_block import ( - build_shard_block, - sign_shard_block, -) -from eth2spec.test.helpers.state import next_slot, transition_to_valid_shard_slot, transition_to - - -def run_shard_blocks(spec, shard_state, signed_shard_block, beacon_parent_state, valid=True): - pre_shard_state = shard_state.copy() - - yield 'pre', pre_shard_state - yield 'signed_shard_block', signed_shard_block - yield 'beacon_parent_state', beacon_parent_state - - if not valid: - expect_assertion_error( - lambda: spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state) - ) - yield 'post', None - return - - spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state) - yield 'post', shard_state - - # Verify `process_shard_block` - block = signed_shard_block.message - - assert shard_state.slot == block.slot - - shard_block_length = len(block.body) - assert shard_state.gasprice == spec.compute_updated_gasprice(pre_shard_state.gasprice, shard_block_length) - if shard_block_length != 0: - shard_state.latest_block_root == block.hash_tree_root() - else: - shard_state.latest_block_root == pre_shard_state.latest_block_root - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_valid_shard_block(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, state, shard, slot=beacon_state.slot, signed=True) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state) - - -# -# verify_shard_block_message -# - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_invalid_shard_parent_root(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - signed_shard_block.message.shard_parent_root = b'\x12' * 32 - sign_shard_block(spec, beacon_state, shard, signed_shard_block) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_invalid_beacon_parent_root(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - signed_shard_block.message.beacon_parent_root = b'\x12' * 32 - sign_shard_block(spec, beacon_state, shard, signed_shard_block) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_invalid_slot(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - signed_shard_block.message.slot = beacon_state.slot + 1 - proposer_index = spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard) - sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_invalid_proposer_index(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - active_validator_indices = spec.get_active_validator_indices(beacon_state, spec.get_current_epoch(beacon_state)) - proposer_index = ( - (spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard) + 1) - % len(active_validator_indices) - ) - signed_shard_block.message.proposer_index = proposer_index - sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_out_of_bound_offset(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - slot = ( - beacon_state.shard_states[shard].slot - + spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1] - + 1 # out-of-bound - ) - transition_to(spec, beacon_state, slot) - - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_invalid_offset(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - # 4 is not in `SHARD_BLOCK_OFFSETS` - shard = 0 - slot = beacon_state.shard_states[shard].slot + 4 - assert slot not in spec.SHARD_BLOCK_OFFSETS - transition_to(spec, beacon_state, slot) - - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_empty_block_body(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, body=b'', signed=True) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -# -# verify_shard_block_signature -# - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_invalid_signature(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=False) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False) - - -# -# Other cases -# - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_max_offset(spec, state): - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - slot = beacon_state.shard_states[shard].slot + spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1] - transition_to(spec, beacon_state, slot) - - shard_state = beacon_state.shard_states[shard] - signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - - yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@always_bls -@only_full_crosslink -def test_pending_shard_parent_block(spec, state): - # Block N - beacon_state = state.copy() - transition_to_valid_shard_slot(spec, beacon_state) - shard = 0 - shard_state = beacon_state.shard_states[shard] - signed_shard_block_1 = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True) - _, _, _, _ = run_shard_blocks(spec, shard_state, signed_shard_block_1, beacon_state) - - # Block N+1 - next_slot(spec, beacon_state) - signed_shard_block_2 = build_shard_block( - spec, beacon_state, shard, - slot=beacon_state.slot, shard_parent_state=shard_state, signed=True - ) - - assert signed_shard_block_2.message.shard_parent_root == shard_state.latest_block_root - assert signed_shard_block_2.message.slot == signed_shard_block_1.message.slot + 1 - yield from run_shard_blocks(spec, shard_state, signed_shard_block_2, beacon_state) diff --git a/tests/core/pyspec/eth2spec/test/phase1/unittests/fork_choice/test_on_shard_block.py b/tests/core/pyspec/eth2spec/test/phase1/unittests/fork_choice/test_on_shard_block.py deleted file mode 100644 index 225800303..000000000 --- a/tests/core/pyspec/eth2spec/test/phase1/unittests/fork_choice/test_on_shard_block.py +++ /dev/null @@ -1,280 +0,0 @@ -from eth2spec.utils.ssz.ssz_impl import hash_tree_root - -from eth2spec.test.context import ( - PHASE0, - ALTAIR, - spec_state_test, - with_all_phases_except, - never_bls, - only_full_crosslink, -) -from eth2spec.test.helpers.attestations import get_valid_on_time_attestation -from eth2spec.test.helpers.shard_block import ( - build_shard_block, - get_shard_transitions, - get_committee_index_of_shard, -) -from eth2spec.test.helpers.fork_choice import add_block_to_store, get_anchor_root, get_genesis_forkchoice_store -from eth2spec.test.helpers.state import state_transition_and_sign_block -from eth2spec.test.helpers.block import build_empty_block - - -def run_on_shard_block(spec, store, signed_block, valid=True): - shard = signed_block.message.shard - if not valid: - try: - spec.on_shard_block(store, signed_block) - except AssertionError: - return - else: - assert False - - spec.on_shard_block(store, signed_block) - shard_store = store.shard_stores[shard] - assert shard_store.signed_blocks[hash_tree_root(signed_block.message)] == signed_block - - -def initialize_store(spec, state, shards): - store = get_genesis_forkchoice_store(spec, state) - anchor_root = get_anchor_root(spec, state) - assert spec.get_head(store) == anchor_root - - for shard in shards: - shard_head_root = spec.get_shard_head(store, shard) - assert shard_head_root == state.shard_states[shard].latest_block_root - shard_store = store.shard_stores[shard] - assert shard_store.block_states[shard_head_root].slot == 0 - assert shard_store.block_states[shard_head_root] == state.shard_states[shard] - - return store - - -def create_and_apply_shard_block(spec, store, shard, beacon_parent_state, shard_blocks_buffer): - body = b'\x56' * 4 - shard_head_root = spec.get_shard_head(store, shard) - shard_store = store.shard_stores[shard] - shard_parent_state = shard_store.block_states[shard_head_root] - assert shard_parent_state.slot != beacon_parent_state.slot - shard_block = build_shard_block( - spec, beacon_parent_state, shard, - shard_parent_state=shard_parent_state, slot=beacon_parent_state.slot, body=body, signed=True - ) - shard_blocks_buffer.append(shard_block) - run_on_shard_block(spec, store, shard_block) - assert spec.get_shard_head(store, shard) == shard_block.message.hash_tree_root() - - -def check_pending_shard_blocks(spec, store, shard, shard_blocks_buffer): - pending_shard_blocks = spec.get_pending_shard_blocks(store, shard) - assert pending_shard_blocks == shard_blocks_buffer - - -def is_in_offset_sets(spec, beacon_head_state, shard): - offset_slots = spec.compute_offset_slots( - beacon_head_state.shard_states[shard].slot, beacon_head_state.slot + 1 - ) - return beacon_head_state.slot in offset_slots - - -def create_attestation_for_shard_blocks(spec, beacon_parent_state, shard, committee_index, blocks, - filter_participant_set=None): - shard_transition = spec.get_shard_transition(beacon_parent_state, shard, blocks) - attestation = get_valid_on_time_attestation( - spec, - beacon_parent_state, - index=committee_index, - shard_transition=shard_transition, - signed=True, - ) - return attestation - - -def create_beacon_block_with_shard_transition( - spec, state, store, shard, shard_blocks_buffer, is_checking_pending_shard_blocks=True): - beacon_block = build_empty_block(spec, state, slot=state.slot + 1) - committee_index = get_committee_index_of_shard(spec, state, state.slot, shard) - has_shard_committee = committee_index is not None # has committee of `shard` at this slot - - beacon_block = build_empty_block(spec, state, slot=state.slot + 1) - - # If next slot has committee of `shard`, add `shard_transtion` to the proposing beacon block - if has_shard_committee and len(shard_blocks_buffer) > 0: - # Sanity check `get_pending_shard_blocks` - # Assert that the pending shard blocks set in the store equal to shard_blocks_buffer - if is_checking_pending_shard_blocks: - check_pending_shard_blocks(spec, store, shard, shard_blocks_buffer) - # Use temporary next state to get ShardTransition of shard block - shard_transitions = get_shard_transitions(spec, state, shard_block_dict={shard: shard_blocks_buffer}) - shard_transition = shard_transitions[shard] - attestation = get_valid_on_time_attestation( - spec, - state, - index=committee_index, - shard_transition=shard_transition, - signed=True, - ) - assert attestation.data.shard == shard - beacon_block.body.attestations = [attestation] - beacon_block.body.shard_transitions = shard_transitions - - # Clear buffer - shard_blocks_buffer.clear() - - return beacon_block - - -def apply_all_attestation_to_store(spec, store, attestations): - for attestation in attestations: - spec.on_attestation(store, attestation) - - -def apply_beacon_block_to_store(spec, state, store, beacon_block): - signed_beacon_block = state_transition_and_sign_block(spec, state, beacon_block) # transition! - store.time = store.time + spec.SECONDS_PER_SLOT - add_block_to_store(spec, store, signed_beacon_block) - apply_all_attestation_to_store(spec, store, signed_beacon_block.message.body.attestations) - - -def create_and_apply_beacon_and_shard_blocks(spec, state, store, shard, shard_blocks_buffer, - is_checking_pending_shard_blocks=True): - beacon_block = create_beacon_block_with_shard_transition( - spec, state, store, shard, shard_blocks_buffer, - is_checking_pending_shard_blocks=is_checking_pending_shard_blocks - ) - apply_beacon_block_to_store(spec, state, store, beacon_block) - - # On shard block at the transitioned `state.slot` - if is_in_offset_sets(spec, state, shard): - # The created shard block would be appended to `shard_blocks_buffer` - create_and_apply_shard_block(spec, store, shard, state, shard_blocks_buffer) - - has_shard_committee = get_committee_index_of_shard(spec, state, state.slot, shard) is not None - return has_shard_committee - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@never_bls # Set to never_bls for testing `check_pending_shard_blocks` -def test_basic(spec, state): - spec.PHASE_1_GENESIS_SLOT = 0 # NOTE: mock genesis slot here - state = spec.upgrade_to_phase1(state) - shard = spec.Shard(1) - - # Initialization - store = initialize_store(spec, state, [shard]) - - # For mainnet config, it's possible that only one committee of `shard` per epoch. - # we set this counter to test more rounds. - shard_committee_counter = 2 - shard_blocks_buffer = [] # the accumulated shard blocks that haven't been crosslinked yet - while shard_committee_counter > 0: - has_shard_committee = create_and_apply_beacon_and_shard_blocks(spec, state, store, shard, shard_blocks_buffer) - if has_shard_committee: - shard_committee_counter -= 1 - - -def create_simple_fork(spec, state, store, shard): - # Beacon block - beacon_block = create_beacon_block_with_shard_transition(spec, state, store, shard, []) - apply_beacon_block_to_store(spec, state, store, beacon_block) - - beacon_head_root = spec.get_head(store) - assert beacon_head_root == beacon_block.hash_tree_root() - beacon_parent_state = store.block_states[beacon_head_root] - shard_store = store.shard_stores[shard] - shard_parent_state = shard_store.block_states[spec.get_shard_head(store, shard)] - - # Shard block A - body = b'\x56' * 4 - forking_block_child = build_shard_block( - spec, beacon_parent_state, shard, - shard_parent_state=shard_parent_state, slot=beacon_parent_state.slot, body=body, signed=True - ) - run_on_shard_block(spec, store, forking_block_child) - - # Shard block B - body = b'\x78' * 4 # different body - shard_block_b = build_shard_block( - spec, beacon_parent_state, shard, - shard_parent_state=shard_parent_state, slot=beacon_parent_state.slot, body=body, signed=True - ) - run_on_shard_block(spec, store, shard_block_b) - - # Set forking_block - current_head = spec.get_shard_head(store, shard) - if current_head == forking_block_child.message.hash_tree_root(): - head_block = forking_block_child - forking_block = shard_block_b - else: - assert current_head == shard_block_b.message.hash_tree_root() - head_block = shard_block_b - forking_block = forking_block_child - - return head_block, forking_block - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_shard_simple_fork(spec, state): - spec.PHASE_1_GENESIS_SLOT = 0 # NOTE: mock genesis slot here - state = spec.upgrade_to_phase1(state) - shard = spec.Shard(1) - - # Initialization - store = initialize_store(spec, state, [shard]) - - # Create fork - _, forking_block = create_simple_fork(spec, state, store, shard) - - # Vote for forking_block - state = store.block_states[spec.get_head(store)].copy() - beacon_block = create_beacon_block_with_shard_transition(spec, state, store, shard, [forking_block], - is_checking_pending_shard_blocks=False) - store.time = store.time + spec.SECONDS_PER_SLOT - apply_all_attestation_to_store(spec, store, beacon_block.body.attestations) - - # Head block has been changed - assert spec.get_shard_head(store, shard) == forking_block.message.hash_tree_root() - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_shard_latest_messages_for_different_shards(spec, state): - spec.PHASE_1_GENESIS_SLOT = 0 # NOTE: mock genesis slot here - state = spec.upgrade_to_phase1(state) - shard_0 = spec.Shard(0) - shard_1 = spec.Shard(1) - - # Initialization - store = initialize_store(spec, state, [shard_0, shard_1]) - - # Shard 0 ---------------------------------- - # Create fork on shard 0 - _, forking_block = create_simple_fork(spec, state, store, shard_0) - - # Vote for forking_block on shard 0 - state = store.block_states[spec.get_head(store)].copy() - beacon_block = create_beacon_block_with_shard_transition(spec, state, store, shard_0, [forking_block], - is_checking_pending_shard_blocks=False) - store.time = store.time + spec.SECONDS_PER_SLOT - apply_all_attestation_to_store(spec, store, beacon_block.body.attestations) - - # Head block of shard 0 has been changed due to the shard latest messages - assert spec.get_shard_head(store, shard_0) == forking_block.message.hash_tree_root() - - # Shard 1 ---------------------------------- - # Run shard 1 after 1~2 epochs - shard_committee_counter = 2 - shard_blocks_buffer = [] # the accumulated shard blocks that haven't been crosslinked yet - while shard_committee_counter > 0: - has_shard_committee = create_and_apply_beacon_and_shard_blocks( - spec, state, store, shard_1, shard_blocks_buffer - ) - if has_shard_committee: - shard_committee_counter -= 1 - - # Go back to see shard 0 ---------------------------------- - # The head block of shard 0 should be unchanged. - assert spec.get_shard_head(store, shard_0) == forking_block.message.hash_tree_root() diff --git a/tests/core/pyspec/eth2spec/test/phase1/__init__.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/__init__.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/__init__.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/block_processing/__init__.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_attestation.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_chunk_challenge.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_chunk_challenge.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_custody_key_reveal.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_custody_key_reveal.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_custody_slashing.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_custody_slashing.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_early_derived_secret_reveal.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/block_processing/test_process_early_derived_secret_reveal.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/epoch_processing/__init__.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/epoch_processing/__init__.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/epoch_processing/test_process_challenge_deadlines.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/epoch_processing/test_process_challenge_deadlines.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/epoch_processing/test_process_custody_final_updates.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/epoch_processing/test_process_custody_final_updates.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/epoch_processing/test_process_reveal_deadlines.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/epoch_processing/test_process_reveal_deadlines.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/sanity/__init__.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/sanity/__init__.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/sanity/test_blocks.py rename to tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py diff --git a/tests/core/pyspec/eth2spec/test/phase1/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/sharding/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/unittests/__init__.py rename to tests/core/pyspec/eth2spec/test/sharding/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/sharding/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/sharding/unittests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/core/pyspec/eth2spec/test/phase1/unittests/test_get_start_shard.py b/tests/core/pyspec/eth2spec/test/sharding/unittests/test_get_start_shard.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/phase1/unittests/test_get_start_shard.py rename to tests/core/pyspec/eth2spec/test/sharding/unittests/test_get_start_shard.py From 906cde061090ac117dccae4b9b51f41f596780ab Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 26 Mar 2021 23:07:48 +0100 Subject: [PATCH 107/127] preserve proof of custody tests --- tests/core/pyspec/eth2spec/test/context.py | 8 +- .../test_process_attestation.py | 9 +- .../test_process_chunk_challenge.py | 27 +++-- .../test_process_custody_key_reveal.py | 15 ++- .../test_process_custody_slashing.py | 16 ++- ...est_process_early_derived_secret_reveal.py | 21 ++-- .../test_process_challenge_deadlines.py | 9 +- .../test_process_custody_final_updates.py | 19 ++-- .../test_process_reveal_deadlines.py | 13 +-- .../proof_of_custody/sanity/test_blocks.py | 98 ++----------------- .../unittests/test_get_start_shard.py | 15 ++- 11 files changed, 82 insertions(+), 168 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 7e06576e1..34a7016f5 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -29,7 +29,13 @@ ConfigName = NewType("ConfigName", str) PHASE0 = SpecForkName('phase0') ALTAIR = SpecForkName('altair') -ALL_PHASES = (PHASE0, ALTAIR) # TODO add merge, sharding, proof_of_custody and das as phases. +# Experimental phases (not included in default "ALL_PHASES"): +MERGE = SpecForkName('merge') +SHARDING = SpecForkName('sharding') +PROOF_OF_CUSTODY = SpecForkName('proof_of_custody') +DAS = SpecForkName('das') + +ALL_PHASES = (PHASE0, ALTAIR) MAINNET = ConfigName('mainnet') MINIMAL = ConfigName('minimal') diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py index 2f641eacb..1603d8f88 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py @@ -1,7 +1,6 @@ from eth2spec.test.context import ( - PHASE0, - ALTAIR, - with_all_phases_except, + PROOF_OF_CUSTODY, + with_phases, spec_state_test, always_bls, ) @@ -13,7 +12,7 @@ from eth2spec.test.helpers.attestations import ( ) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_on_time_success(spec, state): @@ -24,7 +23,7 @@ def test_on_time_success(spec, state): yield from run_attestation_processing(spec, state, attestation) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_late_success(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py index 249074999..61e567504 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py @@ -8,13 +8,12 @@ from eth2spec.test.helpers.attestations import ( ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( - PHASE0, - ALTAIR, + PROOF_OF_CUSTODY, MINIMAL, expect_assertion_error, disable_process_reveal_deadlines, spec_state_test, - with_all_phases_except, + with_phases, with_configs, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing @@ -69,7 +68,7 @@ def run_custody_chunk_response_processing(spec, state, custody_response, valid=T yield 'post', state -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @with_configs([MINIMAL], reason="too slow") @disable_process_reveal_deadlines @@ -93,7 +92,7 @@ def test_challenge_appended(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -119,7 +118,7 @@ def test_challenge_empty_element_replaced(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -145,7 +144,7 @@ def test_duplicate_challenge(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge, valid=False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -173,7 +172,7 @@ def test_second_challenge(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge1) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -198,7 +197,7 @@ def test_multiple_epochs_custody(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -223,7 +222,7 @@ def test_many_epochs_custody(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -244,7 +243,7 @@ def test_off_chain_attestation(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -276,7 +275,7 @@ def test_custody_response(spec, state): yield from run_custody_chunk_response_processing(spec, state, custody_response) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -307,7 +306,7 @@ def test_custody_response_chunk_index_2(spec, state): yield from run_custody_chunk_response_processing(spec, state, custody_response) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -339,7 +338,7 @@ def test_custody_response_multiple_epochs(spec, state): yield from run_custody_chunk_response_processing(spec, state, custody_response) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py index 2ea70703a..64e207062 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py @@ -1,8 +1,7 @@ from eth2spec.test.helpers.custody import get_valid_custody_key_reveal from eth2spec.test.context import ( - PHASE0, - ALTAIR, - with_all_phases_except, + PROOF_OF_CUSTODY, + with_phases, spec_state_test, expect_assertion_error, always_bls, @@ -40,7 +39,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru yield 'post', state -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_success(spec, state): @@ -50,7 +49,7 @@ def test_success(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_reveal_too_early(spec, state): @@ -59,7 +58,7 @@ def test_reveal_too_early(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_wrong_period(spec, state): @@ -68,7 +67,7 @@ def test_wrong_period(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_late_reveal(spec, state): @@ -78,7 +77,7 @@ def test_late_reveal(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_double_reveal(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py index 732d7da05..1783a5bec 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py @@ -9,10 +9,8 @@ from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.ssz.ssz_typing import ByteList from eth2spec.test.helpers.state import get_balance, transition_to from eth2spec.test.context import ( - PHASE0, - MINIMAL, - ALTAIR, - with_all_phases_except, + PROOF_OF_CUSTODY, + with_phases, spec_state_test, expect_assertion_error, disable_process_reveal_deadlines, @@ -113,7 +111,7 @@ def run_standard_custody_slashing_test(spec, yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -121,7 +119,7 @@ def test_custody_slashing(spec, state): yield from run_standard_custody_slashing_test(spec, state) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -129,7 +127,7 @@ def test_incorrect_custody_slashing(spec, state): yield from run_standard_custody_slashing_test(spec, state, correct=False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -137,7 +135,7 @@ def test_multiple_epochs_custody(spec, state): yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -145,7 +143,7 @@ def test_many_epochs_custody(spec, state): yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py index 12cdfdff0..fa8b045ac 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py @@ -1,9 +1,8 @@ from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal from eth2spec.test.helpers.state import next_epoch_via_block, get_balance from eth2spec.test.context import ( - PHASE0, - ALTAIR, - with_all_phases_except, + PROOF_OF_CUSTODY, + with_phases, spec_state_test, expect_assertion_error, always_bls, @@ -42,7 +41,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v yield 'post', state -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_success(spec, state): @@ -51,7 +50,7 @@ def test_success(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @never_bls def test_reveal_from_current_epoch(spec, state): @@ -60,7 +59,7 @@ def test_reveal_from_current_epoch(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @never_bls def test_reveal_from_past_epoch(spec, state): @@ -70,7 +69,7 @@ def test_reveal_from_past_epoch(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_reveal_with_custody_padding(spec, state): @@ -82,7 +81,7 @@ def test_reveal_with_custody_padding(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @always_bls def test_reveal_with_custody_padding_minus_one(spec, state): @@ -94,7 +93,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @never_bls def test_double_reveal(spec, state): @@ -115,7 +114,7 @@ def test_double_reveal(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @never_bls def test_revealer_is_slashed(spec, state): @@ -125,7 +124,7 @@ def test_revealer_is_slashed(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @never_bls def test_far_future_epoch(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py index be058bb4b..516a761d1 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py @@ -7,17 +7,16 @@ from eth2spec.test.helpers.attestations import ( ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( - PHASE0, - ALTAIR, + PROOF_OF_CUSTODY, MINIMAL, spec_state_test, - with_all_phases_except, + with_phases, with_configs, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import ( +from eth2spec.test.proof_of_custody.block_processing.test_process_chunk_challenge import ( run_chunk_challenge_processing, ) @@ -26,7 +25,7 @@ def run_process_challenge_deadlines(spec, state): yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines') -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_validator_slashed_after_chunk_challenge(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py index f2d9acc9a..4522c121a 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py @@ -1,6 +1,5 @@ from eth2spec.test.context import ( - PHASE0, - ALTAIR, + PROOF_OF_CUSTODY, ) from eth2spec.test.helpers.custody import ( get_valid_chunk_challenge, @@ -13,24 +12,26 @@ from eth2spec.test.helpers.attestations import ( ) from eth2spec.test.helpers.state import next_epoch_via_block, transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( - with_all_phases_except, + with_phases, spec_state_test, ) from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import ( +from eth2spec.test.proof_of_custody.block_processing.test_process_chunk_challenge import ( run_chunk_challenge_processing, run_custody_chunk_response_processing, ) -from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing +from eth2spec.test.proof_of_custody.block_processing.test_process_custody_key_reveal import ( + run_custody_key_reveal_processing, +) def run_process_custody_final_updates(spec, state): yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates') -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test def test_validator_withdrawal_delay(spec, state): transition_to_valid_shard_slot(spec, state) @@ -43,7 +44,7 @@ def test_validator_withdrawal_delay(spec, state): assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test def test_validator_withdrawal_reenable_after_custody_reveal(spec, state): transition_to_valid_shard_slot(spec, state) @@ -68,7 +69,7 @@ def test_validator_withdrawal_reenable_after_custody_reveal(spec, state): assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): transition_to_valid_shard_slot(spec, state) @@ -117,7 +118,7 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state): transition_to_valid_shard_slot(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py index 7b2094aea..43078cd45 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py @@ -3,22 +3,23 @@ from eth2spec.test.helpers.custody import ( ) from eth2spec.test.helpers.state import transition_to from eth2spec.test.context import ( - PHASE0, - ALTAIR, + PROOF_OF_CUSTODY, MINIMAL, - with_all_phases_except, + with_phases, with_configs, spec_state_test, ) from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing +from eth2spec.test.proof_of_custody.block_processing.test_process_custody_key_reveal import ( + run_custody_key_reveal_processing, +) def run_process_challenge_deadlines(spec, state): yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines') -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_validator_slashed_after_reveal_deadline(spec, state): @@ -38,7 +39,7 @@ def test_validator_slashed_after_reveal_deadline(spec, state): assert state.validators[0].slashed == 1 -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_validator_not_slashed_after_reveal(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py index 1f17fa911..4dae918ef 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py @@ -1,10 +1,9 @@ from typing import Dict, Sequence from eth2spec.test.context import ( - PHASE0, - ALTAIR, + PROOF_OF_CUSTODY, MINIMAL, - with_all_phases_except, + with_phases, spec_state_test, only_full_crosslink, with_configs, @@ -43,97 +42,12 @@ def run_beacon_block(spec, state, block, valid=True): yield 'post', state -# -# Beacon block with non-empty shard transitions -# - - -def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True): - transition_to(spec, state, state.slot + target_len_offset_slot) - - body = get_sample_shard_block_body(spec, is_max=True) - shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True) - shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} - - shard_transitions = get_shard_transitions(spec, state, shard_block_dict) - attestations = [ - get_valid_on_time_attestation( - spec, - state, - index=committee_index, - shard_transition=shard_transitions[shard], - signed=True, - ) - for shard in shard_block_dict.keys() - ] - - beacon_block = build_empty_block(spec, state, slot=state.slot + 1) - beacon_block.body.attestations = attestations - beacon_block.body.shard_transitions = shard_transitions - - pre_gasprice = state.shard_states[shard].gasprice - pre_shard_states = state.shard_states.copy() - yield 'pre', state.copy() - - if not valid: - state_transition_and_sign_block(spec, state, beacon_block, expect_fail=True) - yield 'block', beacon_block - yield 'post', None - return - - signed_beacon_block = state_transition_and_sign_block(spec, state, beacon_block) - yield 'block', signed_beacon_block - yield 'post', state - - for shard in range(spec.get_active_shard_count(state)): - post_shard_state = state.shard_states[shard] - if shard in shard_block_dict: - # Shard state has been changed to state_transition result - assert post_shard_state == shard_transitions[shard].shard_states[ - len(shard_transitions[shard].shard_states) - 1 - ] - assert post_shard_state.slot == state.slot - 1 - if len((shard_block_dict[shard])) == 0: - # `latest_block_root` is the same - assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root - if target_len_offset_slot == 1 and len(shard_block_dict[shard]) > 0: - assert post_shard_state.gasprice > pre_gasprice - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_process_beacon_block_with_normal_shard_transition(spec, state): - transition_to_valid_shard_slot(spec, state) - - target_len_offset_slot = 1 - committee_index = spec.CommitteeIndex(0) - shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1) - assert state.shard_states[shard].slot == state.slot - 1 - - yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard) - - -@with_all_phases_except([PHASE0, ALTAIR]) -@spec_state_test -@only_full_crosslink -def test_process_beacon_block_with_empty_proposal_transition(spec, state): - transition_to_valid_shard_slot(spec, state) - - target_len_offset_slot = 1 - committee_index = spec.CommitteeIndex(0) - shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1) - assert state.shard_states[shard].slot == state.slot - 1 - - yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard) - - # # Beacon block with custody operations # -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @only_full_crosslink def test_with_shard_transition_with_custody_challenge_and_response(spec, state): @@ -167,7 +81,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state): yield from run_beacon_block(spec, state, block) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @with_configs([MINIMAL]) def test_custody_key_reveal(spec, state): @@ -181,7 +95,7 @@ def test_custody_key_reveal(spec, state): yield from run_beacon_block(spec, state, block) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test def test_early_derived_secret_reveal(spec, state): transition_to_valid_shard_slot(spec, state) @@ -192,7 +106,7 @@ def test_early_derived_secret_reveal(spec, state): yield from run_beacon_block(spec, state, block) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([PROOF_OF_CUSTODY]) @spec_state_test @only_full_crosslink def test_custody_slashing(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/sharding/unittests/test_get_start_shard.py b/tests/core/pyspec/eth2spec/test/sharding/unittests/test_get_start_shard.py index 646dc6eb7..41ef754b3 100644 --- a/tests/core/pyspec/eth2spec/test/sharding/unittests/test_get_start_shard.py +++ b/tests/core/pyspec/eth2spec/test/sharding/unittests/test_get_start_shard.py @@ -1,13 +1,12 @@ from eth2spec.test.context import ( - PHASE0, - ALTAIR, - with_all_phases_except, + SHARDING, + with_phases, spec_state_test, ) from eth2spec.test.helpers.state import next_epoch -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([SHARDING]) @spec_state_test def test_get_committee_count_delta(spec, state): assert spec.get_committee_count_delta(state, 0, 0) == 0 @@ -24,7 +23,7 @@ def test_get_committee_count_delta(spec, state): ) -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([SHARDING]) @spec_state_test def test_get_start_shard_current_epoch_start(spec, state): assert state.current_epoch_start_shard == 0 @@ -40,7 +39,7 @@ def test_get_start_shard_current_epoch_start(spec, state): assert start_shard == state.current_epoch_start_shard -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([SHARDING]) @spec_state_test def test_get_start_shard_next_slot(spec, state): next_epoch(spec, state) @@ -58,7 +57,7 @@ def test_get_start_shard_next_slot(spec, state): assert start_shard == expected_start_shard -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([SHARDING]) @spec_state_test def test_get_start_shard_previous_slot(spec, state): next_epoch(spec, state) @@ -77,7 +76,7 @@ def test_get_start_shard_previous_slot(spec, state): assert start_shard == expected_start_shard -@with_all_phases_except([PHASE0, ALTAIR]) +@with_phases([SHARDING]) @spec_state_test def test_get_start_shard_far_past_epoch(spec, state): initial_epoch = spec.get_current_epoch(state) From a6c01f82121ea6942c84bffb77923a543cb1452e Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 26 Mar 2021 23:21:36 +0100 Subject: [PATCH 108/127] no phase1 in tests, per-feature testing now, executable specs will be re-enabled per feature --- Makefile | 4 ++-- tests/core/pyspec/eth2spec/test/context.py | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index d4e7c1258..1404568da 100644 --- a/Makefile +++ b/Makefile @@ -92,11 +92,11 @@ install_test: test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec find_test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec citest: pyspec mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \ diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 34a7016f5..dcc8906b8 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -61,7 +61,6 @@ class SpecAltair(Spec): ... -# add transfer, bridge, etc. as the spec evolves class SpecForks(TypedDict, total=False): PHASE0: SpecPhase0 ALTAIR: SpecAltair @@ -352,6 +351,9 @@ def with_phases(phases, other_phases=None): ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw) if ALTAIR in run_phases: ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw) + + # TODO: merge, sharding, proof_of_custody and das are not executable yet. + # Tests that specify these features will not run, and get ignored for these specific phases. return ret return wrapper return decorator @@ -374,8 +376,8 @@ def with_configs(configs, reason=None): def is_post_altair(spec): - if spec.fork in [PHASE0, PHASE1]: - # TODO: PHASE1 fork is temporarily parallel to ALTAIR. - # Will make PHASE1 fork inherit ALTAIR later. + # TODO: everything runs in parallel to Altair. + # After features are rebased on the Altair fork, this can be reduced to just PHASE0. + if spec.fork in [PHASE0, MERGE, SHARDING, PROOF_OF_CUSTODY, DAS]: return False return True From 0c94be420405380f104347e643500f4613ab8f9c Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 00:08:50 +0100 Subject: [PATCH 109/127] clean up more phase1 test references --- Makefile | 3 +- .../test_process_sync_committee.py | 18 +++---- .../test_process_sync_committee_updates.py | 4 +- .../test/altair/sanity/test_blocks.py | 16 +++---- tests/core/pyspec/eth2spec/test/context.py | 7 ++- .../eth2spec/test/helpers/attestations.py | 47 ++++--------------- .../eth2spec/test/helpers/block_processing.py | 10 ++-- .../eth2spec/test/helpers/epoch_processing.py | 15 ++++-- .../test/helpers/shard_transitions.py | 37 --------------- .../test_process_attestation.py | 11 ++--- .../test_process_rewards_and_penalties.py | 4 +- .../test/phase0/rewards/test_basic.py | 22 ++++----- .../eth2spec/test/phase0/rewards/test_leak.py | 12 ++--- .../test/phase0/sanity/test_blocks.py | 20 +++----- .../fork_choice/test_on_attestation.py | 13 +---- .../test_process_custody_slashing.py | 1 + .../proof_of_custody/sanity/test_blocks.py | 3 -- 17 files changed, 84 insertions(+), 159 deletions(-) delete mode 100644 tests/core/pyspec/eth2spec/test/helpers/shard_transitions.py diff --git a/Makefile b/Makefile index 1404568da..b22e67c8f 100644 --- a/Makefile +++ b/Makefile @@ -116,10 +116,11 @@ check_toc: $(MARKDOWN_FILES:=.toc) codespell: codespell . --skip ./.git -I .codespell-whitelist +# TODO: add future merge, sharding, etc. packages to linting. lint: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \ - && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1 -p eth2spec.altair + && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair lint_generators: pyspec . venv/bin/activate; cd $(TEST_GENERATORS_DIR); \ diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py index 958953176..7c9ba1358 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py @@ -12,7 +12,7 @@ from eth2spec.test.helpers.sync_committee import ( compute_aggregate_sync_committee_signature, ) from eth2spec.test.context import ( - PHASE0, PHASE1, + PHASE0, MAINNET, MINIMAL, expect_assertion_error, with_all_phases_except, @@ -60,7 +60,7 @@ def get_committee_indices(spec, state, duplicates=False): state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index]) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_invalid_signature_missing_participant(spec, state): @@ -82,7 +82,7 @@ def test_invalid_signature_missing_participant(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_invalid_signature_extra_participant(spec, state): @@ -191,7 +191,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits): ) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @with_configs([MINIMAL], reason="to create nonduplicate committee") @spec_state_test def test_sync_committee_rewards_nonduplicate_committee(spec, state): @@ -207,7 +207,7 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state): yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @with_configs([MAINNET], reason="to create duplicate committee") @spec_state_test def test_sync_committee_rewards_duplicate_committee(spec, state): @@ -223,7 +223,7 @@ def test_sync_committee_rewards_duplicate_committee(spec, state): yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_sync_committee_rewards_not_full_participants(spec, state): @@ -234,7 +234,7 @@ def test_sync_committee_rewards_not_full_participants(spec, state): yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls def test_invalid_signature_past_block(spec, state): @@ -273,7 +273,7 @@ def test_invalid_signature_past_block(spec, state): yield from run_sync_committee_processing(spec, state, invalid_block, expect_exception=True) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @with_configs([MINIMAL], reason="to produce different committee sets") @spec_state_test @always_bls @@ -310,7 +310,7 @@ def test_invalid_signature_previous_committee(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test @always_bls @with_configs([MINIMAL], reason="too slow") diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py index 0cf1e7a49..4c5ac0633 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_sync_committee_updates.py @@ -1,5 +1,5 @@ from eth2spec.test.context import ( - PHASE0, PHASE1, + PHASE0, MINIMAL, spec_state_test, with_all_phases_except, @@ -11,7 +11,7 @@ from eth2spec.test.helpers.epoch_processing import ( ) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_sync_committees_progress(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py index 81e0df713..9f28926d4 100644 --- a/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/altair/sanity/test_blocks.py @@ -12,7 +12,7 @@ from eth2spec.test.helpers.sync_committee import ( compute_aggregate_sync_committee_signature, ) from eth2spec.test.context import ( - PHASE0, PHASE1, + PHASE0, with_all_phases_except, spec_state_test, ) @@ -40,46 +40,46 @@ def run_sync_committee_sanity_test(spec, state, fraction_full=1.0): yield 'post', state -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_full_sync_committee_committee(spec, state): next_epoch(spec, state) yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_half_sync_committee_committee(spec, state): next_epoch(spec, state) yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_empty_sync_committee_committee(spec, state): next_epoch(spec, state) yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_full_sync_committee_committee_genesis(spec, state): yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_half_sync_committee_committee_genesis(spec, state): yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_empty_sync_committee_committee_genesis(spec, state): yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0) -@with_all_phases_except([PHASE0, PHASE1]) +@with_all_phases_except([PHASE0]) @spec_state_test def test_inactivity_scores(spec, state): for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2): diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index dcc8906b8..f37829ee8 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -331,6 +331,10 @@ def with_phases(phases, other_phases=None): return None run_phases = [phase] + if PHASE0 not in run_phases and ALTAIR not in run_phases: + dump_skipping_message("none of the recognized phases are executable, skipping test.") + return None + available_phases = set(run_phases) if other_phases is not None: available_phases |= set(other_phases) @@ -346,7 +350,8 @@ def with_phases(phases, other_phases=None): if ALTAIR in available_phases: phase_dir[ALTAIR] = spec_altair - # return is ignored whenever multiple phases are ran. If + # return is ignored whenever multiple phases are ran. + # This return is for test generators to emit python generators (yielding test vector outputs) if PHASE0 in run_phases: ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw) if ALTAIR in run_phases: diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index 0b3e0484a..8e8002041 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -2,10 +2,9 @@ from lru import LRU from typing import List -from eth2spec.test.context import expect_assertion_error, PHASE1, is_post_altair +from eth2spec.test.context import expect_assertion_error, is_post_altair from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot from eth2spec.test.helpers.block import build_empty_block_for_next_slot -from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee from eth2spec.test.helpers.keys import privkeys from eth2spec.utils import bls from eth2spec.utils.ssz.ssz_typing import Bitlist @@ -51,7 +50,7 @@ def run_attestation_processing(spec, state, attestation, valid=True): yield 'post', state -def build_attestation_data(spec, state, slot, index, shard=None, shard_transition=None, on_time=True): +def build_attestation_data(spec, state, slot, index, shard=None, on_time=True): assert state.slot >= slot if slot == state.slot: @@ -82,32 +81,11 @@ def build_attestation_data(spec, state, slot, index, shard=None, shard_transitio target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root), ) - if spec.fork == PHASE1: - if shard is None: - shard = spec.compute_shard_from_committee_index(state, data.index, data.slot) - data.shard = shard - - if shard_transition is not None: - last_offset_index = len(shard_transition.shard_data_roots) - 1 - data.shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root - data.shard_transition_root = shard_transition.hash_tree_root() - else: - if on_time: - if data.slot == spec.GENESIS_SLOT: - data.shard_head_root = spec.Root() - data.shard_transition_root = spec.ShardTransition().hash_tree_root() - else: - shard_transition = spec.get_shard_transition(state, shard, shard_blocks=[]) - last_offset_index = len(shard_transition.shard_data_roots) - 1 - data.shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root - data.shard_transition_root = shard_transition.hash_tree_root() - else: - data.shard_head_root = state.shard_states[shard].latest_block_root - data.shard_transition_root = spec.Root() + # if spec.fork == SHARDING # TODO: add extra data for shard voting return data -def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False): +def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False): ''' Construct on-time attestation for next slot ''' @@ -121,13 +99,12 @@ def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_tran state, slot=slot, index=index, - shard_transition=shard_transition, signed=signed, on_time=True, ) -def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False, shard_transition=None): +def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False): ''' Construct on-time attestation for next slot ''' @@ -137,7 +114,7 @@ def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False, index = 0 return get_valid_attestation(spec, state, slot=slot, index=index, - signed=signed, on_time=False, shard_transition=shard_transition) + signed=signed, on_time=False) def get_valid_attestation(spec, @@ -145,7 +122,6 @@ def get_valid_attestation(spec, slot=None, index=None, filter_participant_set=None, - shard_transition=None, signed=False, on_time=True): # If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed. @@ -156,7 +132,7 @@ def get_valid_attestation(spec, index = 0 attestation_data = build_attestation_data( - spec, state, slot=slot, index=index, shard_transition=shard_transition, on_time=on_time + spec, state, slot=slot, index=index, on_time=on_time ) beacon_committee = spec.get_beacon_committee( @@ -258,16 +234,11 @@ def next_epoch_with_attestations(spec, committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest)) if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)): for index in range(committees_per_slot): - if spec.fork == PHASE1: - shard = spec.compute_shard_from_committee_index(post_state, index, slot_to_attest) - shard_transition = get_shard_transition_of_committee(spec, post_state, index) - block.body.shard_transitions[shard] = shard_transition - else: - shard_transition = None + # if spec.fork == SHARDING: TODO: add shard data to attestation, include shard headers in block cur_attestation = get_valid_attestation( spec, post_state, slot_to_attest, - shard_transition=shard_transition, index=index, signed=True, on_time=True + index=index, signed=True, on_time=True ) block.body.attestations.append(cur_attestation) diff --git a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py index 5c8ecb36c..e2dfcfe5b 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py @@ -16,6 +16,8 @@ def get_process_calls(spec): lambda state, block: for_ops(state, block.body.proposer_slashings, spec.process_proposer_slashing), 'process_attester_slashing': lambda state, block: for_ops(state, block.body.attester_slashings, spec.process_attester_slashing), + 'process_shard_header': + lambda state, block: for_ops(state, block.body.shard_headers, spec.process_shard_header), 'process_attestation': lambda state, block: for_ops(state, block.body.attestations, spec.process_attestation), 'process_deposit': @@ -25,12 +27,12 @@ def get_process_calls(spec): # Altair 'process_sync_committee': lambda state, block: spec.process_sync_committee(state, block.body.sync_aggregate), - # PHASE1 + # Merge + 'process_application_payload': + lambda state, block: spec.process_application_payload(state, block.body), + # Proof of custody 'process_custody_game_operations': lambda state, block: spec.process_custody_game_operations(state, block.body), - 'process_shard_transitions': - lambda state, block: spec.process_shard_transitions( - state, block.body.shard_transitions, block.body.attestations), } diff --git a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py index e8e3dd492..4d508d0ce 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py @@ -3,14 +3,20 @@ from eth2spec.test.context import is_post_altair def get_process_calls(spec): + # unrecognized processing functions will be ignored. + # This sums up the aggregate of processing functions of all phases. + # Note: make sure to explicitly remove/override a processing function in later phases, + # or the old function will stick around. return [ - # PHASE0 'process_justification_and_finalization', 'process_rewards_and_penalties', 'process_registry_updates', - 'process_reveal_deadlines', - 'process_challenge_deadlines', + 'process_reveal_deadlines', # proof of custody + 'process_challenge_deadlines', # proof of custody 'process_slashings', + 'process_pending_header.', # sharding + 'charge_confirmed_header_fees', # sharding + 'reset_pending_headers', # sharding 'process_eth1_data_reset', 'process_effective_balance_updates', 'process_slashings_reset', @@ -21,8 +27,7 @@ def get_process_calls(spec): 'process_participation_record_updates' ), 'process_sync_committee_updates', - # PHASE1 - 'process_phase_1_final_updates', + 'process_shard_epoch_increment' # sharding ] diff --git a/tests/core/pyspec/eth2spec/test/helpers/shard_transitions.py b/tests/core/pyspec/eth2spec/test/helpers/shard_transitions.py deleted file mode 100644 index d10d1ee7b..000000000 --- a/tests/core/pyspec/eth2spec/test/helpers/shard_transitions.py +++ /dev/null @@ -1,37 +0,0 @@ -from eth2spec.test.context import expect_assertion_error - - -def run_shard_transitions_processing(spec, state, shard_transitions, attestations, valid=True): - """ - Run ``process_shard_transitions``, yielding: - - pre-state ('pre') - - shard_transitions ('shard_transitions') - - attestations ('attestations') - - post-state ('post'). - If ``valid == False``, run expecting ``AssertionError`` - """ - # yield pre-state - yield 'pre', state - yield 'shard_transitions', shard_transitions - yield 'attestations', attestations - - # If the attestation is invalid, processing is aborted, and there is no post-state. - if not valid: - expect_assertion_error(lambda: spec.process_shard_transitions(state, shard_transitions, attestations)) - yield 'post', None - return - - # process crosslinks - spec.process_shard_transitions(state, shard_transitions, attestations) - - # yield post-state - yield 'post', state - - -def get_shard_transition_of_committee(spec, state, committee_index, shard_blocks=None): - if shard_blocks is None: - shard_blocks = [] - - shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot) - shard_transition = spec.get_shard_transition(state, shard, shard_blocks=shard_blocks) - return shard_transition diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py index 99a82879d..0f5eef407 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py @@ -2,12 +2,10 @@ from eth2spec.test.context import ( spec_state_test, always_bls, never_bls, with_all_phases, - with_all_phases_except, spec_test, low_balances, with_custom_state, single_phase, - PHASE1, ) from eth2spec.test.helpers.attestations import ( run_attestation_processing, @@ -380,7 +378,7 @@ def test_correct_after_epoch_delay(spec, state): # Incorrect head but correct source/target at different slot inclusions # -@with_all_phases_except([PHASE1]) +@with_all_phases @spec_state_test def test_incorrect_head_min_inclusion_delay(spec, state): attestation = get_valid_attestation(spec, state, signed=False) @@ -434,10 +432,7 @@ def test_incorrect_head_after_epoch_delay(spec, state): # Incorrect head and target but correct source at different slot inclusions # -# Note: current phase 1 spec checks -# `assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))` -# so this test can't pass that until phase 1 refactor is merged -@with_all_phases_except([PHASE1]) +@with_all_phases @spec_state_test def test_incorrect_head_and_target_min_inclusion_delay(spec, state): attestation = get_valid_attestation(spec, state, signed=False) @@ -494,7 +489,7 @@ def test_incorrect_head_and_target_after_epoch_delay(spec, state): # Correct head and source but incorrect target at different slot inclusions # -@with_all_phases_except([PHASE1]) +@with_all_phases @spec_state_test def test_incorrect_target_min_inclusion_delay(spec, state): attestation = get_valid_attestation(spec, state, signed=False) diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py index f7c9b394e..cebe65e03 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( spec_state_test, spec_test, with_all_phases, single_phase, - with_phases, PHASE0, PHASE1, + with_phases, PHASE0, with_custom_state, zero_activation_threshold, misc_balances, low_single_balance, @@ -103,7 +103,7 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state): assert state.balances[index] == pre_state.balances[index] -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_attestations_random_incorrect_fields(spec, state): attestations = prepare_state_with_attestations(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py index 7871d3fcf..82dbccc52 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py +++ b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_basic.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test +from eth2spec.test.context import PHASE0, with_all_phases, with_phases, spec_state_test import eth2spec.test.helpers.rewards as rewards_helpers @@ -32,7 +32,7 @@ def test_full_but_partial_participation(spec, state): yield from rewards_helpers.run_test_full_but_partial_participation(spec, state) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_one_attestation_one_correct(spec, state): yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state) @@ -75,7 +75,7 @@ def test_some_very_low_effective_balances_that_did_not_attest(spec, state): # -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_half_correct_target_incorrect_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( @@ -86,7 +86,7 @@ def test_full_half_correct_target_incorrect_head(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_correct_target_incorrect_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( @@ -97,7 +97,7 @@ def test_full_correct_target_incorrect_head(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_half_incorrect_target_incorrect_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( @@ -108,7 +108,7 @@ def test_full_half_incorrect_target_incorrect_head(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_half_incorrect_target_correct_head(spec, state): yield from rewards_helpers.run_test_full_fraction_incorrect( @@ -119,31 +119,31 @@ def test_full_half_incorrect_target_correct_head(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_delay_one_slot(spec, state): yield from rewards_helpers.run_test_full_delay_one_slot(spec, state) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_delay_max_slots(spec, state): yield from rewards_helpers.run_test_full_delay_max_slots(spec, state) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_full_mixed_delay(spec, state): yield from rewards_helpers.run_test_full_mixed_delay(spec, state) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_proposer_not_in_attestations(spec, state): yield from rewards_helpers.run_test_proposer_not_in_attestations(spec, state) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test def test_duplicate_attestations_at_later_slots(spec, state): yield from rewards_helpers.run_test_duplicate_attestations_at_later_slots(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py index b2ed6f5d8..f11b2734f 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py +++ b/tests/core/pyspec/eth2spec/test/phase0/rewards/test_leak.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test +from eth2spec.test.context import PHASE0, with_all_phases, with_phases, spec_state_test from eth2spec.test.helpers.rewards import leaking import eth2spec.test.helpers.rewards as rewards_helpers @@ -38,7 +38,7 @@ def test_full_but_partial_participation_leak(spec, state): yield from rewards_helpers.run_test_full_but_partial_participation(spec, state) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test @leaking() def test_one_attestation_one_correct_leak(spec, state): @@ -87,7 +87,7 @@ def test_some_very_low_effective_balances_that_did_not_attest_leak(spec, state): # -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test @leaking() def test_full_half_correct_target_incorrect_head_leak(spec, state): @@ -99,7 +99,7 @@ def test_full_half_correct_target_incorrect_head_leak(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test @leaking() def test_full_correct_target_incorrect_head_leak(spec, state): @@ -111,7 +111,7 @@ def test_full_correct_target_incorrect_head_leak(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test @leaking() def test_full_half_incorrect_target_incorrect_head_leak(spec, state): @@ -123,7 +123,7 @@ def test_full_half_incorrect_target_incorrect_head_leak(spec, state): ) -@with_phases([PHASE0, PHASE1]) +@with_phases([PHASE0]) @spec_state_test @leaking() def test_full_half_incorrect_target_correct_head_leak(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py index 8af0411c4..3efe07b01 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py @@ -20,14 +20,13 @@ from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits -from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee from eth2spec.test.helpers.multi_operations import ( run_slash_and_exit, run_test_full_random_operations, ) from eth2spec.test.context import ( - PHASE0, PHASE1, MINIMAL, + PHASE0, MINIMAL, spec_test, spec_state_test, dump_skipping_message, with_phases, with_all_phases, single_phase, expect_assertion_error, always_bls, @@ -563,7 +562,7 @@ def test_duplicate_attester_slashing(spec, state): yield 'post', None -# All AttesterSlashing tests should be adopted for Phase 1 but helper support is not yet there +# TODO All AttesterSlashing tests should be adopted for SHARDING and later but helper support is not yet there @with_all_phases @spec_state_test @@ -770,16 +769,10 @@ def test_attestation(spec, state): attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) index = 0 - if spec.fork == PHASE1: - shard = spec.compute_shard_from_committee_index(state, index, state.slot) - shard_transition = get_shard_transition_of_committee(spec, state, index) - attestation_block.body.shard_transitions[shard] = shard_transition - else: - shard_transition = None + # if spec.fork == SHARDING: + # TODO add shard data to block to vote on - attestation = get_valid_attestation( - spec, state, shard_transition=shard_transition, index=index, signed=True, on_time=True - ) + attestation = get_valid_attestation(spec, state, index=index, signed=True, on_time=True) if not is_post_altair(spec): pre_current_attestations_len = len(state.current_epoch_attestations) @@ -810,9 +803,10 @@ def test_attestation(spec, state): assert spec.hash_tree_root(state.previous_epoch_participation) == pre_current_epoch_participation_root -# In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago, +# After SHARDING is enabled, a committee is computed for SHARD_COMMITTEE_PERIOD slots ago, # exceeding the minimal-config randao mixes memory size. # Applies to all voluntary-exit sanity block tests. +# TODO: when integrating SHARDING tests, voluntary-exit tests may need to change. @with_all_phases @spec_state_test diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py index 926ebec80..4d5f1ca16 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_attestation.py @@ -1,4 +1,4 @@ -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR, with_all_phases, spec_state_test +from eth2spec.test.context import PHASE0, ALTAIR, with_all_phases, spec_state_test from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot @@ -23,16 +23,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True): epoch=attestation.data.target.epoch, root=attestation.data.beacon_block_root, ) - elif spec.fork == PHASE1: - latest_message = spec.LatestMessage( - epoch=attestation.data.target.epoch, - root=attestation.data.beacon_block_root, - ) - shard_latest_message = spec.ShardLatestMessage( - epoch=attestation.data.target.epoch, - root=attestation.data.shard_head_root, - ) - assert store.shard_stores[attestation.data.shard].latest_messages[sample_index] == shard_latest_message + # elif spec.fork == SHARDING: TODO: check if vote count for shard blob increased as expected assert ( store.latest_messages[sample_index] == latest_message diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py index 1783a5bec..2765d2980 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py @@ -9,6 +9,7 @@ from eth2spec.test.helpers.keys import privkeys from eth2spec.utils.ssz.ssz_typing import ByteList from eth2spec.test.helpers.state import get_balance, transition_to from eth2spec.test.context import ( + MINIMAL, PROOF_OF_CUSTODY, with_phases, spec_state_test, diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py index 4dae918ef..b9afbc0eb 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py @@ -5,7 +5,6 @@ from eth2spec.test.context import ( MINIMAL, with_phases, spec_state_test, - only_full_crosslink, with_configs, ) from eth2spec.test.helpers.attestations import get_valid_on_time_attestation @@ -49,7 +48,6 @@ def run_beacon_block(spec, state, block, valid=True): @with_phases([PROOF_OF_CUSTODY]) @spec_state_test -@only_full_crosslink def test_with_shard_transition_with_custody_challenge_and_response(spec, state): transition_to_valid_shard_slot(spec, state) @@ -108,7 +106,6 @@ def test_early_derived_secret_reveal(spec, state): @with_phases([PROOF_OF_CUSTODY]) @spec_state_test -@only_full_crosslink def test_custody_slashing(spec, state): transition_to_valid_shard_slot(spec, state) From c225813f7d69a7c5daa50818632b0819bf4c7f4d Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 00:46:04 +0100 Subject: [PATCH 110/127] split phase1 configs into feature configs --- configs/mainnet/merge.yaml | 7 ++ configs/mainnet/phase1.yaml | 101 ------------------------- configs/mainnet/proof_of_custody.yaml | 48 ++++++++++++ configs/mainnet/sharding.yaml | 43 +++++++++++ configs/minimal/merge.yaml | 7 ++ configs/minimal/phase1.yaml | 105 -------------------------- configs/minimal/proof_of_custody.yaml | 48 ++++++++++++ configs/minimal/sharding.yaml | 44 +++++++++++ 8 files changed, 197 insertions(+), 206 deletions(-) create mode 100644 configs/mainnet/merge.yaml delete mode 100644 configs/mainnet/phase1.yaml create mode 100644 configs/mainnet/proof_of_custody.yaml create mode 100644 configs/mainnet/sharding.yaml create mode 100644 configs/minimal/merge.yaml delete mode 100644 configs/minimal/phase1.yaml create mode 100644 configs/minimal/proof_of_custody.yaml create mode 100644 configs/minimal/sharding.yaml diff --git a/configs/mainnet/merge.yaml b/configs/mainnet/merge.yaml new file mode 100644 index 000000000..b9924d71d --- /dev/null +++ b/configs/mainnet/merge.yaml @@ -0,0 +1,7 @@ +# Mainnet preset - The Merge + +# Fork +# --------------------------------------------------------------- +MERGE_FORK_VERSION: 0x02000000 +# TBD +MERGE_FORK_SLOT: 0 diff --git a/configs/mainnet/phase1.yaml b/configs/mainnet/phase1.yaml deleted file mode 100644 index 1d689f2b5..000000000 --- a/configs/mainnet/phase1.yaml +++ /dev/null @@ -1,101 +0,0 @@ -# Mainnet preset - phase 1 - -CONFIG_NAME: "mainnet" - -# phase1-fork -# --------------------------------------------------------------- -PHASE_1_FORK_VERSION: 0x01000000 -# [STUB] -PHASE_1_FORK_SLOT: 0 -INITIAL_ACTIVE_SHARDS: 64 - - -# beacon-chain -# --------------------------------------------------------------- -# Misc -# 2**10 (= 1,024) -MAX_SHARDS: 1024 -# 2**7 (= 128) -LIGHT_CLIENT_COMMITTEE_SIZE: 128 -# 2**3 (= 8) -GASPRICE_ADJUSTMENT_COEFFICIENT: 8 - -# Shard block configs -# 2**20 (= 1048,576) bytes -MAX_SHARD_BLOCK_SIZE: 1048576 -# 2**18 (= 262,144) bytes -TARGET_SHARD_BLOCK_SIZE: 262144 -# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length. -SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] -# len(SHARD_BLOCK_OFFSETS) -MAX_SHARD_BLOCKS_PER_ATTESTATION: 12 -# 2**12 (= 4,096) -BYTES_PER_CUSTODY_CHUNK: 4096 -# ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK) -CUSTODY_RESPONSE_DEPTH: 8 - -# Gwei values -# 2**14 (= 16,384) Gwei -MAX_GASPRICE: 16384 -# 2**3 (= 8) Gwei -MIN_GASPRICE: 8 - -# Time parameters -# 2**3 (= 8) | online epochs -ONLINE_PERIOD: 8 -# 2**8 (= 256) | epochs -LIGHT_CLIENT_COMMITTEE_PERIOD: 256 - -# Max operations per block -# 2**20 (= 1,048,576) -MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS: 1048576 - -# Domain types -DOMAIN_SHARD_PROPOSAL: 0x80000000 -DOMAIN_SHARD_COMMITTEE: 0x81000000 -DOMAIN_LIGHT_CLIENT: 0x82000000 -# custody-game spec -DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000 -DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000 -DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000 - -# custody-game -# --------------------------------------------------------------- -# Time parameters -# 2**1 (= 2) epochs, 12.8 minutes -RANDAO_PENALTY_EPOCHS: 2 -# 2**15 (= 32,768) epochs, ~146 days -EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 32768 -# 2**14 (= 16,384) epochs ~73 days -EPOCHS_PER_CUSTODY_PERIOD: 16384 -# 2**11 (= 2,048) epochs, ~9 days -CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048 -# 2**15 (= 32,768) epochs, ~146 days -MAX_CHUNK_CHALLENGE_DELAY: 32768 - -# Misc parameters -# 2**256 - 189 -CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747 -# 3 -CUSTODY_SECRETS: 3 -# 2**5 (= 32) bytes -BYTES_PER_CUSTODY_ATOM: 32 -# 1/1024 chance of custody bit 1 -CUSTODY_PROBABILITY_EXPONENT: 10 - -# Max operations -# 2**8 (= 256) -MAX_CUSTODY_KEY_REVEALS: 256 -# 2**0 (= 1) -MAX_EARLY_DERIVED_SECRET_REVEALS: 1 -# 2**2 (= 2) -MAX_CUSTODY_CHUNK_CHALLENGES: 4 -# 2** 4 (= 16) -MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 16 -# 2**0 (= 1) -MAX_CUSTODY_SLASHINGS: 1 - -# Reward and penalty quotients -EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2 -# 2**8 (= 256) -MINOR_REWARD_QUOTIENT: 256 diff --git a/configs/mainnet/proof_of_custody.yaml b/configs/mainnet/proof_of_custody.yaml new file mode 100644 index 000000000..eb299c621 --- /dev/null +++ b/configs/mainnet/proof_of_custody.yaml @@ -0,0 +1,48 @@ +# Mainnet preset - Proof of Custody + +# Time parameters +# --------------------------------------------------------------- +# 2**1 (= 2) epochs, 12.8 minutes +RANDAO_PENALTY_EPOCHS: 2 +# 2**15 (= 32,768) epochs, ~146 days +EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 32768 +# 2**14 (= 16,384) epochs ~73 days +EPOCHS_PER_CUSTODY_PERIOD: 16384 +# 2**11 (= 2,048) epochs, ~9 days +CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048 +# 2**15 (= 32,768) epochs, ~146 days +MAX_CHUNK_CHALLENGE_DELAY: 32768 + +# Misc parameters +# --------------------------------------------------------------- +# 2**256 - 189 +CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747 +# 3 +CUSTODY_SECRETS: 3 +# 1/1024 chance of custody bit 1 +CUSTODY_PROBABILITY_EXPONENT: 10 + +# Max operations +# --------------------------------------------------------------- +# 2**8 (= 256) +MAX_CUSTODY_KEY_REVEALS: 256 +# 2**0 (= 1) +MAX_EARLY_DERIVED_SECRET_REVEALS: 1 +# 2**2 (= 2) +MAX_CUSTODY_CHUNK_CHALLENGES: 4 +# 2** 4 (= 16) +MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 16 +# 2**0 (= 1) +MAX_CUSTODY_SLASHINGS: 1 + +# Reward and penalty quotients +# --------------------------------------------------------------- +EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2 +# 2**8 (= 256) +MINOR_REWARD_QUOTIENT: 256 + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000 +DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000 +DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000 diff --git a/configs/mainnet/sharding.yaml b/configs/mainnet/sharding.yaml new file mode 100644 index 000000000..099a71442 --- /dev/null +++ b/configs/mainnet/sharding.yaml @@ -0,0 +1,43 @@ +# Mainnet preset - Sharding + +# Fork +# --------------------------------------------------------------- +SHARDING_FORK_VERSION: 0x03000000 +# TBD +SHARDING_FORK_SLOT: 0 + + +# Beacon-chain +# --------------------------------------------------------------- +# Misc +# 2**10 (= 1,024) +MAX_SHARDS: 1024 +# 2**6 = 64 +INITIAL_ACTIVE_SHARDS: 64 +# 2**3 (= 8) +GASPRICE_ADJUSTMENT_COEFFICIENT: 8 + +# Shard block configs +# --------------------------------------------------------------- +MAX_SHARD_HEADERS_PER_SHARD: 4 +# 2**11 (= 2,048) +MAX_SAMPLES_PER_BLOCK: 2048 +# 2**10 (= 1,1024) +TARGET_SAMPLES_PER_BLOCK: 1024 + +# Gwei values +# --------------------------------------------------------------- +# 2**33 (= 8,589,934,592) Gwei +MAX_GASPRICE: 8589934592 +# 2**3 (= 8) Gwei +MIN_GASPRICE: 8 + +# Time parameters +# --------------------------------------------------------------- +# 2**8 (= 256) | epochs +SHARD_COMMITTEE_PERIOD: 256 + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_SHARD_PROPOSAL: 0x80000000 +DOMAIN_SHARD_COMMITTEE: 0x81000000 \ No newline at end of file diff --git a/configs/minimal/merge.yaml b/configs/minimal/merge.yaml new file mode 100644 index 000000000..f36caa832 --- /dev/null +++ b/configs/minimal/merge.yaml @@ -0,0 +1,7 @@ +# Minimal preset - The Merge + +# Fork +# --------------------------------------------------------------- +MERGE_FORK_VERSION: 0x02000001 +# TBD +MERGE_FORK_SLOT: 0 diff --git a/configs/minimal/phase1.yaml b/configs/minimal/phase1.yaml deleted file mode 100644 index cf17ba52a..000000000 --- a/configs/minimal/phase1.yaml +++ /dev/null @@ -1,105 +0,0 @@ -# Minimal preset - phase 1 - -CONFIG_NAME: "minimal" - -# phase1-fork -# --------------------------------------------------------------- -# [customized] for testnet distinction -PHASE_1_FORK_VERSION: 0x01000001 -# [STUB] -PHASE_1_FORK_SLOT: 0 -# [customized] reduced for testing -INITIAL_ACTIVE_SHARDS: 2 - - -# beacon-chain -# --------------------------------------------------------------- -# Misc -# [customized] reduced for testing -MAX_SHARDS: 8 -# 2**7 (= 128) -LIGHT_CLIENT_COMMITTEE_SIZE: 128 -# 2**3 (= 8) -GASPRICE_ADJUSTMENT_COEFFICIENT: 8 - -# Shard block configs -# 2**20 (= 1048,576) bytes -MAX_SHARD_BLOCK_SIZE: 1048576 -# 2**18 (= 262,144) bytes -TARGET_SHARD_BLOCK_SIZE: 262144 -# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length. -SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] -# len(SHARD_BLOCK_OFFSETS) -MAX_SHARD_BLOCKS_PER_ATTESTATION: 12 -# 2**12 (= 4,096) -BYTES_PER_CUSTODY_CHUNK: 4096 -# ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK) -CUSTODY_RESPONSE_DEPTH: 8 - -# Gwei values -# 2**14 (= 16,384) Gwei -MAX_GASPRICE: 16384 -# 2**3 (= 8) Gwei -MIN_GASPRICE: 8 - -# Time parameters -# 2**3 (= 8) | online epochs -ONLINE_PERIOD: 8 -# 2**8 (= 256) | epochs -LIGHT_CLIENT_COMMITTEE_PERIOD: 256 - -# Max operations per block -# 2**20 (= 1,048,576) -MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS: 1048576 - -# Domain types -DOMAIN_SHARD_PROPOSAL: 0x80000000 -DOMAIN_SHARD_COMMITTEE: 0x81000000 -DOMAIN_LIGHT_CLIENT: 0x82000000 -# custody-game spec -DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000 -DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000 -DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000 - -# custody-game -# --------------------------------------------------------------- -# Time parameters -# 2**1 (= 2) epochs -RANDAO_PENALTY_EPOCHS: 2 -# [customized] quicker for testing -EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 64 -# [customized] quicker for testing -EPOCHS_PER_CUSTODY_PERIOD: 32 -# [customized] quicker for testing -CUSTODY_PERIOD_TO_RANDAO_PADDING: 8 -# [customize for faster testing] -MAX_CHUNK_CHALLENGE_DELAY: 64 - - -# Misc parameters -# 2**256 - 189 -CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747 -# 3 -CUSTODY_SECRETS: 3 -# 2**5 (= 32) bytes -BYTES_PER_CUSTODY_ATOM: 32 -# 1/4 chance of custody bit 1 [customized for faster testing] -CUSTODY_PROBABILITY_EXPONENT: 2 - - -# Max operations -# 2**8 (= 256) -MAX_CUSTODY_KEY_REVEALS: 256 -# 2**0 (= 1) -MAX_EARLY_DERIVED_SECRET_REVEALS: 1 -# [customized] -MAX_CUSTODY_CHUNK_CHALLENGES: 2 -# [customized] -MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 8 -# 2**0 (= 1) -MAX_CUSTODY_SLASHINGS: 1 - -# Reward and penalty quotients -EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2 -# 2**8 (= 256) -MINOR_REWARD_QUOTIENT: 256 diff --git a/configs/minimal/proof_of_custody.yaml b/configs/minimal/proof_of_custody.yaml new file mode 100644 index 000000000..67394c35c --- /dev/null +++ b/configs/minimal/proof_of_custody.yaml @@ -0,0 +1,48 @@ +# Minimal preset - Proof of Custody + +# Time parameters +# --------------------------------------------------------------- +# 2**1 (= 2) epochs, 12.8 minutes +RANDAO_PENALTY_EPOCHS: 2 +# [customized] quicker for testing +EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 64 +# [customized] quicker for testing +EPOCHS_PER_CUSTODY_PERIOD: 32 +# [customized] quicker for testing +CUSTODY_PERIOD_TO_RANDAO_PADDING: 8 +# [customize for faster testing] +MAX_CHUNK_CHALLENGE_DELAY: 64 + +# Misc parameters +# --------------------------------------------------------------- +# 2**256 - 189 +CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747 +# 3 +CUSTODY_SECRETS: 3 +# 1/4 chance of custody bit 1 [customized for faster testing] +CUSTODY_PROBABILITY_EXPONENT: 2 + +# Max operations +# --------------------------------------------------------------- +# 2**8 (= 256) +MAX_CUSTODY_KEY_REVEALS: 256 +# 2**0 (= 1) +MAX_EARLY_DERIVED_SECRET_REVEALS: 1 +# [customized] +MAX_CUSTODY_CHUNK_CHALLENGES: 2 +# [customized] +MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 8 +# 2**0 (= 1) +MAX_CUSTODY_SLASHINGS: 1 + +# Reward and penalty quotients +# --------------------------------------------------------------- +EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2 +# 2**8 (= 256) +MINOR_REWARD_QUOTIENT: 256 + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000 +DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000 +DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000 diff --git a/configs/minimal/sharding.yaml b/configs/minimal/sharding.yaml new file mode 100644 index 000000000..1b73a5769 --- /dev/null +++ b/configs/minimal/sharding.yaml @@ -0,0 +1,44 @@ +# Minimal preset - Sharding + +# Fork +# --------------------------------------------------------------- +SHARDING_FORK_VERSION: 0x03000000 +# TBD +SHARDING_FORK_SLOT: 0 + + +# Beacon-chain +# --------------------------------------------------------------- +# Misc +# [customized] reduced for testing +MAX_SHARDS: 8 +# [customized] reduced for testing +INITIAL_ACTIVE_SHARDS: 2 +# 2**3 (= 8) +GASPRICE_ADJUSTMENT_COEFFICIENT: 8 + +# Shard block configs +# --------------------------------------------------------------- +MAX_SHARD_HEADERS_PER_SHARD: 4 +# 2**11 (= 2,048) +MAX_SAMPLES_PER_BLOCK: 2048 +# 2**10 (= 1,1024) +TARGET_SAMPLES_PER_BLOCK: 1024 + +# Gwei values +# --------------------------------------------------------------- +# 2**33 (= 8,589,934,592) Gwei +MAX_GASPRICE: 8589934592 +# 2**3 (= 8) Gwei +MIN_GASPRICE: 8 + +# Time parameters +# --------------------------------------------------------------- +# 2**8 (= 256) | epochs +SHARD_COMMITTEE_PERIOD: 256 + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_SHARD_PROPOSAL: 0x80000000 +DOMAIN_SHARD_COMMITTEE: 0x81000000 +DOMAIN_LIGHT_CLIENT: 0x82000000 \ No newline at end of file From 379ba98eb756431e0272738c5917e47891a9ee58 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 00:47:10 +0100 Subject: [PATCH 111/127] move static sharding configuration into constants --- specs/sharding/beacon-chain.md | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index dfb04963b..c376e1377 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -10,6 +10,7 @@ - [Introduction](#introduction) - [Custom types](#custom-types) +- [Constants](#constants) - [Configuration](#configuration) - [Misc](#misc) - [Shard block configs](#shard-block-configs) @@ -72,6 +73,17 @@ We define the following Python custom types for type hinting and readability: | `BLSCommitment` | `bytes48` | A G1 curve point | | `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` | +## Constants + +The following values are (non-configurable) constants used throughout the specification. + +| Name | Value | Notes | +| - | - | - | +| `PRIMITIVE_ROOT_OF_UNITY` | `5` | Primitive root of unity of the BLS12_381 (inner) modulus | +| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (=2) | Factor by which samples are extended for data availability encoding | +| `POINTS_PER_SAMPLE` | `uint64(2**3)` (= 8) | 31 * 8 = 248 bytes | +| `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | + ## Configuration ### Misc @@ -82,16 +94,12 @@ We define the following Python custom types for type hinting and readability: | `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | -| `MAX_SHARD_HEADERS` | `MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD` | | -| `PRIMITIVE_ROOT_OF_UNITY` | `5` | Primitive root of unity of the BLS12_381 (inner) modulus | -| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (=2) | Factor by which samples are extended for data availability encoding | ### Shard block configs | Name | Value | Notes | | - | - | - | -| `POINTS_PER_SAMPLE` | `uint64(2**3)` (= 8) | 31 * 8 = 248 bytes | | `MAX_SAMPLES_PER_BLOCK` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes | | `TARGET_SAMPLES_PER_BLOCK` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes | @@ -101,7 +109,6 @@ We define the following Python custom types for type hinting and readability: | - | - | | `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. | | `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` | -| `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | | `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // (MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE, MODULUS)` | ### Gwei values @@ -148,7 +155,7 @@ class AttestationData(Container): ```python class BeaconBlockBody(phase0.BeaconBlockBody): # insert phase 0 fields - shard_headers: List[SignedShardHeader, MAX_SHARD_HEADERS] + shard_headers: List[SignedShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD] ``` ### `BeaconState` @@ -159,8 +166,8 @@ class BeaconState(phase0.BeaconState): previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # New fields - previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] - current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARD_HEADERS * SLOTS_PER_EPOCH] + previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] + current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] shard_gasprice: uint64 current_epoch_start_shard: Shard From 29f78a7f06dfc2fae0ebccc7d8ca8858c923b1ad Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 00:53:25 +0100 Subject: [PATCH 112/127] proof of custody full name --- README.md | 4 ++-- specs/{custody => proof_of_custody}/custody-game.md | 0 specs/{custody => proof_of_custody}/validator.md | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename specs/{custody => proof_of_custody}/custody-game.md (100%) rename specs/{custody => proof_of_custody}/validator.md (100%) diff --git a/README.md b/README.md index 1bae65f96..c1f09f675 100644 --- a/README.md +++ b/README.md @@ -39,8 +39,8 @@ Sharding follows the merge, and is divided into three parts: * [Beacon Chain changes](specs/sharding/beacon-chain.md) * [P2P Network changes](specs/sharding/p2p-interface.md) * Proof of Custody - * [Custody Game](specs/custody/custody-game.md) - * [Validator custody work](specs/custody/validator.md) + * [Custody Game](specs/proof_of_custody/custody-game.md) + * [Validator custody work](specs/proof_of_custody/validator.md) * Data Availability Sampling * Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD). * [Core types and functions](specs/das/das-core.md) diff --git a/specs/custody/custody-game.md b/specs/proof_of_custody/custody-game.md similarity index 100% rename from specs/custody/custody-game.md rename to specs/proof_of_custody/custody-game.md diff --git a/specs/custody/validator.md b/specs/proof_of_custody/validator.md similarity index 100% rename from specs/custody/validator.md rename to specs/proof_of_custody/validator.md From d067237e302ac47f07c53a28d6cba71b859a8e46 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 01:10:13 +0100 Subject: [PATCH 113/127] list new Merge specs, update features descriptions --- README.md | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c1f09f675..290ca1084 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,10 @@ This repository hosts the current Eth2 specifications. Discussions about design [![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec) -Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: +Core specifications for Eth2 clients be found in [specs](specs/). These are divided into features. +Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready. + +The current features are: ### Phase 0 @@ -29,19 +32,28 @@ Core specifications for Eth2 clients be found in [specs](specs/). These are divi ### Merge -The merge is still actively in R&D; see an [ethresear.ch](https://ethresear.ch) post describing the proposed basic mechanism [here](https://ethresear.ch/t/the-eth1-eth2-transition/6265) and the section of [ethereum.org](https://ethereum.org) describing the merge at a high level [here](https://ethereum.org/en/eth2/docking/). +The merge is still actively in R&D. The specifications outline a general direction for engineering work, +while the details are in review and may change. + +* Background material: + * An [ethresear.ch](https://ethresear.ch) post [describing the basic mechanism](https://ethresear.ch/t/the-eth1-eth2-transition/6265) + * [ethereum.org](https://ethereum.org) high-level description of the merge [here](https://ethereum.org/en/eth2/docking/) +* Specifications: + * [Beacon Chain changes](specs/merge/beacon-chain.md) + * [Fork Choice changes](specs/merge/fork-choice.md) + * [Validator additions](specs/merge/validator.md) ### Sharding Sharding follows the merge, and is divided into three parts: -* Sharding base functionality +* Sharding base functionality - In early engineering phase * [Beacon Chain changes](specs/sharding/beacon-chain.md) * [P2P Network changes](specs/sharding/p2p-interface.md) -* Proof of Custody +* Proof of Custody - Ready, dependent on sharding * [Custody Game](specs/proof_of_custody/custody-game.md) * [Validator custody work](specs/proof_of_custody/validator.md) -* Data Availability Sampling +* Data Availability Sampling - In active R&D * Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD). * [Core types and functions](specs/das/das-core.md) * [P2P Networking](specs/das/p2p-interface.md) From 5e186fc1365696595336ea9f37264bc2f8e68832 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 01:28:52 +0100 Subject: [PATCH 114/127] update sharding spec to extend Merge types --- specs/sharding/beacon-chain.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index c376e1377..21a938663 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -153,19 +153,18 @@ class AttestationData(Container): ### `BeaconBlockBody` ```python -class BeaconBlockBody(phase0.BeaconBlockBody): - # insert phase 0 fields +class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] shard_headers: List[SignedShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD] ``` ### `BeaconState` ```python -class BeaconState(phase0.BeaconState): - # Updated fields +class BeaconState(merge.BeaconState): # [extends The Merge block body] + # [Updated fields] previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] - # New fields + # [New fields] previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] @@ -420,8 +419,8 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) process_eth1_data(state, block.body) - process_light_client_aggregate(state, block.body) - process_operations(state, block.body) + process_operations(state, block.body) # [Modified] + process_application_payload(state, block.body) # [Part of the Merge] ``` #### Operations @@ -548,7 +547,7 @@ The goal is to ensure that a proof can only be constructed if `deg(B) < l` (ther ### Epoch transition -This epoch transition overrides the phase0 epoch transition: +This epoch transition overrides the Merge epoch transition: ```python def process_epoch(state: BeaconState) -> None: From 2bc3e814b0c6c085cf0af1a56fe17eb486e64969 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 27 Mar 2021 01:42:23 +0100 Subject: [PATCH 115/127] rename to custody game --- README.md | 6 ++--- ...roof_of_custody.yaml => custody_game.yaml} | 2 +- ...roof_of_custody.yaml => custody_game.yaml} | 2 +- .../custody-game.md | 0 .../validator.md | 0 tests/core/pyspec/eth2spec/test/context.py | 6 ++--- .../__init__.py | 0 .../block_processing/__init__.py | 0 .../test_process_attestation.py | 6 ++--- .../test_process_chunk_challenge.py | 24 +++++++++---------- .../test_process_custody_key_reveal.py | 12 +++++----- .../test_process_custody_slashing.py | 12 +++++----- ...est_process_early_derived_secret_reveal.py | 18 +++++++------- .../epoch_processing/__init__.py | 0 .../test_process_challenge_deadlines.py | 6 ++--- .../test_process_custody_final_updates.py | 14 +++++------ .../test_process_reveal_deadlines.py | 8 +++---- .../sanity/__init__.py | 0 .../sanity/test_blocks.py | 10 ++++---- .../eth2spec/test/helpers/block_processing.py | 2 +- .../eth2spec/test/helpers/epoch_processing.py | 4 ++-- 21 files changed, 66 insertions(+), 66 deletions(-) rename configs/mainnet/{proof_of_custody.yaml => custody_game.yaml} (97%) rename configs/minimal/{proof_of_custody.yaml => custody_game.yaml} (97%) rename specs/{proof_of_custody => custody_game}/custody-game.md (100%) rename specs/{proof_of_custody => custody_game}/validator.md (100%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/block_processing/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/block_processing/test_process_attestation.py (90%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/block_processing/test_process_chunk_challenge.py (97%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/block_processing/test_process_custody_key_reveal.py (93%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/block_processing/test_process_custody_slashing.py (96%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/block_processing/test_process_early_derived_secret_reveal.py (93%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/epoch_processing/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/epoch_processing/test_process_challenge_deadlines.py (93%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/epoch_processing/test_process_custody_final_updates.py (95%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/epoch_processing/test_process_reveal_deadlines.py (91%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/sanity/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{proof_of_custody => custody_game}/sanity/test_blocks.py (97%) diff --git a/README.md b/README.md index 290ca1084..8cb545cfa 100644 --- a/README.md +++ b/README.md @@ -50,9 +50,9 @@ Sharding follows the merge, and is divided into three parts: * Sharding base functionality - In early engineering phase * [Beacon Chain changes](specs/sharding/beacon-chain.md) * [P2P Network changes](specs/sharding/p2p-interface.md) -* Proof of Custody - Ready, dependent on sharding - * [Custody Game](specs/proof_of_custody/custody-game.md) - * [Validator custody work](specs/proof_of_custody/validator.md) +* Custody Game - Ready, dependent on sharding + * [Custody Game](specs/custody_game/custody-game.md) + * [Validator custody work](specs/custody_game/validator.md) * Data Availability Sampling - In active R&D * Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD). * [Core types and functions](specs/das/das-core.md) diff --git a/configs/mainnet/proof_of_custody.yaml b/configs/mainnet/custody_game.yaml similarity index 97% rename from configs/mainnet/proof_of_custody.yaml rename to configs/mainnet/custody_game.yaml index eb299c621..ecb2dc377 100644 --- a/configs/mainnet/proof_of_custody.yaml +++ b/configs/mainnet/custody_game.yaml @@ -1,4 +1,4 @@ -# Mainnet preset - Proof of Custody +# Mainnet preset - Custody Game # Time parameters # --------------------------------------------------------------- diff --git a/configs/minimal/proof_of_custody.yaml b/configs/minimal/custody_game.yaml similarity index 97% rename from configs/minimal/proof_of_custody.yaml rename to configs/minimal/custody_game.yaml index 67394c35c..8b8992fb6 100644 --- a/configs/minimal/proof_of_custody.yaml +++ b/configs/minimal/custody_game.yaml @@ -1,4 +1,4 @@ -# Minimal preset - Proof of Custody +# Minimal preset - Custody Game # Time parameters # --------------------------------------------------------------- diff --git a/specs/proof_of_custody/custody-game.md b/specs/custody_game/custody-game.md similarity index 100% rename from specs/proof_of_custody/custody-game.md rename to specs/custody_game/custody-game.md diff --git a/specs/proof_of_custody/validator.md b/specs/custody_game/validator.md similarity index 100% rename from specs/proof_of_custody/validator.md rename to specs/custody_game/validator.md diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index f37829ee8..02968a266 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -32,7 +32,7 @@ ALTAIR = SpecForkName('altair') # Experimental phases (not included in default "ALL_PHASES"): MERGE = SpecForkName('merge') SHARDING = SpecForkName('sharding') -PROOF_OF_CUSTODY = SpecForkName('proof_of_custody') +CUSTODY_GAME = SpecForkName('custody_game') DAS = SpecForkName('das') ALL_PHASES = (PHASE0, ALTAIR) @@ -357,7 +357,7 @@ def with_phases(phases, other_phases=None): if ALTAIR in run_phases: ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw) - # TODO: merge, sharding, proof_of_custody and das are not executable yet. + # TODO: merge, sharding, custody_game and das are not executable yet. # Tests that specify these features will not run, and get ignored for these specific phases. return ret return wrapper @@ -383,6 +383,6 @@ def with_configs(configs, reason=None): def is_post_altair(spec): # TODO: everything runs in parallel to Altair. # After features are rebased on the Altair fork, this can be reduced to just PHASE0. - if spec.fork in [PHASE0, MERGE, SHARDING, PROOF_OF_CUSTODY, DAS]: + if spec.fork in [PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS]: return False return True diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/__init__.py b/tests/core/pyspec/eth2spec/test/custody_game/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/__init__.py rename to tests/core/pyspec/eth2spec/test/custody_game/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/__init__.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/__init__.py rename to tests/core/pyspec/eth2spec/test/custody_game/block_processing/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py similarity index 90% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py rename to tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py index 1603d8f88..92633a8c5 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py @@ -1,5 +1,5 @@ from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, with_phases, spec_state_test, always_bls, @@ -12,7 +12,7 @@ from eth2spec.test.helpers.attestations import ( ) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_on_time_success(spec, state): @@ -23,7 +23,7 @@ def test_on_time_success(spec, state): yield from run_attestation_processing(spec, state, attestation) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_late_success(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py similarity index 97% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py rename to tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py index 61e567504..69143812f 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_chunk_challenge.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py @@ -8,7 +8,7 @@ from eth2spec.test.helpers.attestations import ( ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, MINIMAL, expect_assertion_error, disable_process_reveal_deadlines, @@ -68,7 +68,7 @@ def run_custody_chunk_response_processing(spec, state, custody_response, valid=T yield 'post', state -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @with_configs([MINIMAL], reason="too slow") @disable_process_reveal_deadlines @@ -92,7 +92,7 @@ def test_challenge_appended(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -118,7 +118,7 @@ def test_challenge_empty_element_replaced(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -144,7 +144,7 @@ def test_duplicate_challenge(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge, valid=False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -172,7 +172,7 @@ def test_second_challenge(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge1) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -197,7 +197,7 @@ def test_multiple_epochs_custody(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -222,7 +222,7 @@ def test_many_epochs_custody(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -243,7 +243,7 @@ def test_off_chain_attestation(spec, state): yield from run_chunk_challenge_processing(spec, state, challenge) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -275,7 +275,7 @@ def test_custody_response(spec, state): yield from run_custody_chunk_response_processing(spec, state, custody_response) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -306,7 +306,7 @@ def test_custody_response_chunk_index_2(spec, state): yield from run_custody_chunk_response_processing(spec, state, custody_response) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -338,7 +338,7 @@ def test_custody_response_multiple_epochs(spec, state): yield from run_custody_chunk_response_processing(spec, state, custody_response) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_key_reveal.py similarity index 93% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py rename to tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_key_reveal.py index 64e207062..a825d7c75 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_key_reveal.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_key_reveal.py @@ -1,6 +1,6 @@ from eth2spec.test.helpers.custody import get_valid_custody_key_reveal from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, with_phases, spec_state_test, expect_assertion_error, @@ -39,7 +39,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru yield 'post', state -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_success(spec, state): @@ -49,7 +49,7 @@ def test_success(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_reveal_too_early(spec, state): @@ -58,7 +58,7 @@ def test_reveal_too_early(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_wrong_period(spec, state): @@ -67,7 +67,7 @@ def test_wrong_period(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_late_reveal(spec, state): @@ -77,7 +77,7 @@ def test_late_reveal(spec, state): yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_double_reveal(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py similarity index 96% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py rename to tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py index 2765d2980..12ac708aa 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_custody_slashing.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py @@ -10,7 +10,7 @@ from eth2spec.utils.ssz.ssz_typing import ByteList from eth2spec.test.helpers.state import get_balance, transition_to from eth2spec.test.context import ( MINIMAL, - PROOF_OF_CUSTODY, + CUSTODY_GAME, with_phases, spec_state_test, expect_assertion_error, @@ -112,7 +112,7 @@ def run_standard_custody_slashing_test(spec, yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -120,7 +120,7 @@ def test_custody_slashing(spec, state): yield from run_standard_custody_slashing_test(spec, state) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -128,7 +128,7 @@ def test_incorrect_custody_slashing(spec, state): yield from run_standard_custody_slashing_test(spec, state, correct=False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -136,7 +136,7 @@ def test_multiple_epochs_custody(spec, state): yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") @@ -144,7 +144,7 @@ def test_many_epochs_custody(spec, state): yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @disable_process_reveal_deadlines @with_configs([MINIMAL], reason="too slow") diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_early_derived_secret_reveal.py similarity index 93% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py rename to tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_early_derived_secret_reveal.py index fa8b045ac..d3da6c580 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/block_processing/test_process_early_derived_secret_reveal.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_early_derived_secret_reveal.py @@ -1,7 +1,7 @@ from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal from eth2spec.test.helpers.state import next_epoch_via_block, get_balance from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, with_phases, spec_state_test, expect_assertion_error, @@ -41,7 +41,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v yield 'post', state -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_success(spec, state): @@ -50,7 +50,7 @@ def test_success(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_reveal_from_current_epoch(spec, state): @@ -59,7 +59,7 @@ def test_reveal_from_current_epoch(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_reveal_from_past_epoch(spec, state): @@ -69,7 +69,7 @@ def test_reveal_from_past_epoch(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_reveal_with_custody_padding(spec, state): @@ -81,7 +81,7 @@ def test_reveal_with_custody_padding(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @always_bls def test_reveal_with_custody_padding_minus_one(spec, state): @@ -93,7 +93,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_double_reveal(spec, state): @@ -114,7 +114,7 @@ def test_double_reveal(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_revealer_is_slashed(spec, state): @@ -124,7 +124,7 @@ def test_revealer_is_slashed(spec, state): yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @never_bls def test_far_future_epoch(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/__init__.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/__init__.py rename to tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py similarity index 93% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py rename to tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py index 516a761d1..f0e353cec 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_challenge_deadlines.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py @@ -7,7 +7,7 @@ from eth2spec.test.helpers.attestations import ( ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, MINIMAL, spec_state_test, with_phases, @@ -16,7 +16,7 @@ from eth2spec.test.context import ( from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from eth2spec.test.proof_of_custody.block_processing.test_process_chunk_challenge import ( +from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import ( run_chunk_challenge_processing, ) @@ -25,7 +25,7 @@ def run_process_challenge_deadlines(spec, state): yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines') -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_validator_slashed_after_chunk_challenge(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py similarity index 95% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py rename to tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py index 4522c121a..acc076bce 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_custody_final_updates.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py @@ -1,5 +1,5 @@ from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, ) from eth2spec.test.helpers.custody import ( get_valid_chunk_challenge, @@ -18,11 +18,11 @@ from eth2spec.test.context import ( from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from eth2spec.test.proof_of_custody.block_processing.test_process_chunk_challenge import ( +from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import ( run_chunk_challenge_processing, run_custody_chunk_response_processing, ) -from eth2spec.test.proof_of_custody.block_processing.test_process_custody_key_reveal import ( +from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import ( run_custody_key_reveal_processing, ) @@ -31,7 +31,7 @@ def run_process_custody_final_updates(spec, state): yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates') -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_delay(spec, state): transition_to_valid_shard_slot(spec, state) @@ -44,7 +44,7 @@ def test_validator_withdrawal_delay(spec, state): assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_reenable_after_custody_reveal(spec, state): transition_to_valid_shard_slot(spec, state) @@ -69,7 +69,7 @@ def test_validator_withdrawal_reenable_after_custody_reveal(spec, state): assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): transition_to_valid_shard_slot(spec, state) @@ -118,7 +118,7 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state): transition_to_valid_shard_slot(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_reveal_deadlines.py similarity index 91% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py rename to tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_reveal_deadlines.py index 43078cd45..c82ebd8bb 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/epoch_processing/test_process_reveal_deadlines.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_reveal_deadlines.py @@ -3,14 +3,14 @@ from eth2spec.test.helpers.custody import ( ) from eth2spec.test.helpers.state import transition_to from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, MINIMAL, with_phases, with_configs, spec_state_test, ) from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with -from eth2spec.test.proof_of_custody.block_processing.test_process_custody_key_reveal import ( +from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import ( run_custody_key_reveal_processing, ) @@ -19,7 +19,7 @@ def run_process_challenge_deadlines(spec, state): yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines') -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_validator_slashed_after_reveal_deadline(spec, state): @@ -39,7 +39,7 @@ def test_validator_slashed_after_reveal_deadline(spec, state): assert state.validators[0].slashed == 1 -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @with_configs([MINIMAL], reason="too slow") def test_validator_not_slashed_after_reveal(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/__init__.py b/tests/core/pyspec/eth2spec/test/custody_game/sanity/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/__init__.py rename to tests/core/pyspec/eth2spec/test/custody_game/sanity/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py similarity index 97% rename from tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py rename to tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py index b9afbc0eb..88dd54bda 100644 --- a/tests/core/pyspec/eth2spec/test/proof_of_custody/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py @@ -1,7 +1,7 @@ from typing import Dict, Sequence from eth2spec.test.context import ( - PROOF_OF_CUSTODY, + CUSTODY_GAME, MINIMAL, with_phases, spec_state_test, @@ -46,7 +46,7 @@ def run_beacon_block(spec, state, block, valid=True): # -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_with_shard_transition_with_custody_challenge_and_response(spec, state): transition_to_valid_shard_slot(spec, state) @@ -79,7 +79,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state): yield from run_beacon_block(spec, state, block) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test @with_configs([MINIMAL]) def test_custody_key_reveal(spec, state): @@ -93,7 +93,7 @@ def test_custody_key_reveal(spec, state): yield from run_beacon_block(spec, state, block) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_early_derived_secret_reveal(spec, state): transition_to_valid_shard_slot(spec, state) @@ -104,7 +104,7 @@ def test_early_derived_secret_reveal(spec, state): yield from run_beacon_block(spec, state, block) -@with_phases([PROOF_OF_CUSTODY]) +@with_phases([CUSTODY_GAME]) @spec_state_test def test_custody_slashing(spec, state): transition_to_valid_shard_slot(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py index e2dfcfe5b..676d8cb6d 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py @@ -30,7 +30,7 @@ def get_process_calls(spec): # Merge 'process_application_payload': lambda state, block: spec.process_application_payload(state, block.body), - # Proof of custody + # Custody Game 'process_custody_game_operations': lambda state, block: spec.process_custody_game_operations(state, block.body), } diff --git a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py index 4d508d0ce..3c47c4895 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py @@ -11,8 +11,8 @@ def get_process_calls(spec): 'process_justification_and_finalization', 'process_rewards_and_penalties', 'process_registry_updates', - 'process_reveal_deadlines', # proof of custody - 'process_challenge_deadlines', # proof of custody + 'process_reveal_deadlines', # custody game + 'process_challenge_deadlines', # custody game 'process_slashings', 'process_pending_header.', # sharding 'charge_confirmed_header_fees', # sharding From 64fc912ce7a33b84854c95a2857284362b4bbab6 Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Sat, 27 Mar 2021 16:13:46 +0000 Subject: [PATCH 116/127] Make if condition more precise --- specs/altair/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 1ce78ad7c..998958643 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -461,7 +461,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: # Participation flag indices participation_flag_indices = [] - if is_matching_head and is_matching_target and state.slot <= data.slot + MIN_ATTESTATION_INCLUSION_DELAY: + if is_matching_head and is_matching_target and state.slot == data.slot + MIN_ATTESTATION_INCLUSION_DELAY: participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH): participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) From b23b16c7b74faa312b02811b80fe9b1fdfcdae2f Mon Sep 17 00:00:00 2001 From: Taneli Hukkinen Date: Wed, 17 Mar 2021 02:15:08 +0200 Subject: [PATCH 117/127] Fix list indentation --- specs/phase0/p2p-interface.md | 8 ++++---- specs/phase0/weak-subjectivity.md | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 28b166651..ccd8cc218 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -1298,10 +1298,10 @@ Requests are segregated by protocol ID to: 6. Parallelise RFCs (or Eth2 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronization point to version the general top-level protocol. - 1. This has the benefit that clients can explicitly choose which RFCs to deploy - without buying into all other RFCs that may be included in that top-level version. - 2. Affording this level of granularity with a top-level protocol would imply creating as many variants - (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2). + 1. This has the benefit that clients can explicitly choose which RFCs to deploy + without buying into all other RFCs that may be included in that top-level version. + 2. Affording this level of granularity with a top-level protocol would imply creating as many variants + (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2). 7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework. diff --git a/specs/phase0/weak-subjectivity.md b/specs/phase0/weak-subjectivity.md index 4137efade..fd1b3cc28 100644 --- a/specs/phase0/weak-subjectivity.md +++ b/specs/phase0/weak-subjectivity.md @@ -141,12 +141,12 @@ Clients should allow users to input a Weak Subjectivity Checkpoint at startup, a ### Weak Subjectivity Sync Procedure 1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format, - where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`. - Example of the format: + where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`. + Example of the format: -``` -0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544 -``` + ``` + 0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544 + ``` 2. Check the weak subjectivity requirements: - *IF* `epoch_number > store.finalized_checkpoint.epoch`, From b02aed93a3ea67b2cc2e108590b21bc2823beca7 Mon Sep 17 00:00:00 2001 From: Taneli Hukkinen Date: Mon, 29 Mar 2021 01:17:29 +0300 Subject: [PATCH 118/127] Fix another list's indent --- specs/altair/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 25897b97e..ac73b2509 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -412,7 +412,7 @@ Subnet assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advanc ENR advertisement is indicated by setting the appropriate bit(s) of the bitfield found under the `syncnets` key in the ENR corresponding to the derived `subnet_id`(s). Any bits modified for the sync committee responsibilities are unset in the ENR after any validators have left the sync committee. -*Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens which implies subnet assignments are not known until then. + *Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens which implies subnet assignments are not known until then. Early sync committee members should listen for topic subscriptions from peers and employ discovery via the ENR advertisements near the fork boundary to form initial subnets Some early sync committee rewards may be missed while the initial subnets form. From f73aa2f98dca6438f00f28b30e44ccbc9bbef3b6 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sun, 28 Mar 2021 17:06:12 -0700 Subject: [PATCH 119/127] Fix spelling: negotiatied -> negotiated --- specs/altair/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index c087fd49a..0d5af0a71 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -204,7 +204,7 @@ In advance of the fork, implementations can opt in to both run the v1 and v2 for This is non-breaking, and is recommended as soon as the fork specification is stable. The v1 variants will be deprecated, and implementations should use v2 when available -(as negotiatied with peers via LibP2P multistream-select). +(as negotiated with peers via LibP2P multistream-select). The v1 method MAY be unregistered at the fork boundary. In the event of a request on v1 for an Altair specific payload, From d28cac0e8fc1f7e761e365f8c2a8c408015dda47 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 29 Mar 2021 15:38:43 +0200 Subject: [PATCH 120/127] clean up test phase 1 testgen references --- .../pyspec/eth2spec/config/config_util.py | 4 ++-- .../pyspec/eth2spec/test/helpers/state.py | 4 ++-- .../test_process_rewards_and_penalties.py | 6 ----- tests/formats/README.md | 4 ++-- tests/generators/README.md | 10 ++------ tests/generators/epoch_processing/main.py | 18 +++++++-------- tests/generators/finality/main.py | 10 +++----- tests/generators/fork_choice/main.py | 9 +++----- tests/generators/genesis/main.py | 7 ++---- tests/generators/operations/main.py | 23 +++++++++---------- tests/generators/rewards/main.py | 9 +++----- tests/generators/sanity/main.py | 10 ++------ tests/generators/ssz_static/main.py | 6 +---- 13 files changed, 42 insertions(+), 78 deletions(-) diff --git a/tests/core/pyspec/eth2spec/config/config_util.py b/tests/core/pyspec/eth2spec/config/config_util.py index 3f6764e98..ee2007600 100644 --- a/tests/core/pyspec/eth2spec/config/config_util.py +++ b/tests/core/pyspec/eth2spec/config/config_util.py @@ -17,8 +17,8 @@ def apply_constants_config(spec_globals: Dict[str, Any], warn_if_unknown: bool = # Keep the same type as the default value indicates (which may be an SSZ basic type subclass, e.g. 'Gwei') spec_globals[k] = spec_globals[k].__class__(v) else: - # Note: Phase 0 spec will not know the phase 1 config values. - # Yet, during debugging you can enable explicit warnings. + # Note: The phase 0 spec will not warn if Altair or later config values are applied. + # During debugging you can enable explicit warnings. if warn_if_unknown: print(f"WARNING: unknown config key: '{k}' with value: '{v}'") diff --git a/tests/core/pyspec/eth2spec/test/helpers/state.py b/tests/core/pyspec/eth2spec/test/helpers/state.py index f53d8f711..8980053f5 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/state.py +++ b/tests/core/pyspec/eth2spec/test/helpers/state.py @@ -42,9 +42,9 @@ def transition_to_slot_via_block(spec, state, slot): def transition_to_valid_shard_slot(spec, state): """ - Transition to slot `spec.PHASE_1_FORK_SLOT + 1` and fork at `spec.PHASE_1_FORK_SLOT`. + Transition to slot `spec.SHARDING_FORK_SLOT + 1` and fork at `spec.SHARDING_FORK_SLOT`. """ - transition_to(spec, state, spec.PHASE_1_FORK_SLOT) + transition_to(spec, state, spec.SHARDING_FORK_SLOT) next_slot(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py index cebe65e03..9abcff57e 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_rewards_and_penalties.py @@ -292,8 +292,6 @@ def test_duplicate_attestation(spec, state): assert single_state.balances[index] == dup_state.balances[index] -# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged -# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test @with_phases([PHASE0]) @spec_state_test def test_duplicate_participants_different_attestation_1(spec, state): @@ -334,8 +332,6 @@ def test_duplicate_participants_different_attestation_1(spec, state): assert single_correct_state.balances[index] == dup_state.balances[index] -# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged -# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test @with_phases([PHASE0]) @spec_state_test def test_duplicate_participants_different_attestation_2(spec, state): @@ -377,8 +373,6 @@ def test_duplicate_participants_different_attestation_2(spec, state): assert single_correct_state.balances[index] == dup_state.balances[index] -# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged -# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test @with_phases([PHASE0]) @spec_state_test def test_duplicate_participants_different_attestation_3(spec, state): diff --git a/tests/formats/README.md b/tests/formats/README.md index 7808538ad..8f817220e 100644 --- a/tests/formats/README.md +++ b/tests/formats/README.md @@ -108,8 +108,8 @@ As a top level dir, it is not duplicated, and the used config can be copied righ ### `/` -This would be: "phase0", "transferparty", "phase1", etc. Each introduces new tests, but does not copy tests that do not change. -If you like to test phase 1, you run phase 0 tests, with the configuration that includes phase 1 changes. Out of scope for now however. +This would be: "phase0", "altair", etc. Each introduces new tests, and modifies any tests that change: +some tests of earlier forks repeat with updated state data. ### `/` diff --git a/tests/generators/README.md b/tests/generators/README.md index f629c8b74..5e12e6bfe 100644 --- a/tests/generators/README.md +++ b/tests/generators/README.md @@ -164,13 +164,12 @@ Another example, to generate tests from pytests: ```python from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": @@ -181,15 +180,10 @@ if __name__ == "__main__": altair_mods = {**{key: 'eth2spec.test.altair.sanity.test_' + key for key in [ 'blocks', ]}, **phase_0_mods} # also run the previous phase 0 tests - phase_1_mods = {**{key: 'eth2spec.test.phase1.sanity.test_' + key for key in [ - 'blocks', # more phase 1 specific block tests - 'shard_blocks', - ]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods) diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index 5207330a7..e3caf69fa 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -1,11 +1,10 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": @@ -27,16 +26,17 @@ if __name__ == "__main__": ]}, **phase_0_mods, } # also run the previous phase 0 tests - phase_1_mods = {**{key: 'eth2spec.test.phase1.epoch_processing.test_process_' + key for key in [ - 'reveal_deadlines', - 'challenge_deadlines', - 'custody_final_updates', - ]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec) + + # TODO Custody Game testgen is disabled for now + # custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [ + # 'reveal_deadlines', + # 'challenge_deadlines', + # 'custody_final_updates', + # ]}, **phase_0_mods} # also run the previous phase 0 tests (but against custody game spec) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="epoch_processing", specs=specs, all_mods=all_mods) diff --git a/tests/generators/finality/main.py b/tests/generators/finality/main.py index 5598028a2..29ab46c39 100644 --- a/tests/generators/finality/main.py +++ b/tests/generators/finality/main.py @@ -1,23 +1,19 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'} - # No additional altair or phase 1 specific finality tests, yet. - altair_mods = phase_0_mods - phase_1_mods = phase_0_mods + altair_mods = phase_0_mods # No additional altair specific finality tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 492e596f4..f09bbcc0a 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -1,25 +1,22 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ 'get_head', ]} - # No additional Altair or phase 1 specific finality tests, yet. + # No additional Altair specific finality tests, yet. altair_mods = phase_0_mods - phase_1_mods = phase_0_mods all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="fork_choice", specs=specs, all_mods=all_mods) diff --git a/tests/generators/genesis/main.py b/tests/generators/genesis/main.py index 4fede298f..362677c24 100644 --- a/tests/generators/genesis/main.py +++ b/tests/generators/genesis/main.py @@ -1,11 +1,10 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": @@ -14,11 +13,9 @@ if __name__ == "__main__": 'validity', ]} altair_mods = phase_0_mods - phase_1_mods = phase_0_mods all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="genesis", specs=specs, all_mods=all_mods) diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index f5c181710..f00c4f894 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -1,11 +1,10 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": @@ -23,19 +22,19 @@ if __name__ == "__main__": ]}, **phase_0_mods, } # also run the previous phase 0 tests - phase_1_mods = {**{key: 'eth2spec.test.phase1.block_processing.test_process_' + key for key in [ - 'attestation', - 'chunk_challenge', - 'custody_key_reveal', - 'custody_slashing', - 'early_derived_secret_reveal', - 'shard_transition', - ]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec) + + # TODO Custody Game testgen is disabled for now + # custody_game_mods = {**{key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [ + # 'attestation', + # 'chunk_challenge', + # 'custody_key_reveal', + # 'custody_slashing', + # 'early_derived_secret_reveal', + # ]}, **phase_0_mods} # also run the previous phase 0 tests (but against custody game spec) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="operations", specs=specs, all_mods=all_mods) diff --git a/tests/generators/rewards/main.py b/tests/generators/rewards/main.py index 3a7e8f63d..d8115f4cb 100644 --- a/tests/generators/rewards/main.py +++ b/tests/generators/rewards/main.py @@ -1,11 +1,10 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": @@ -14,14 +13,12 @@ if __name__ == "__main__": 'leak', 'random', ]} - # No additional altair or phase 1 specific rewards tests, yet. + # No additional altair specific rewards tests, yet. altair_mods = phase_0_mods - phase_1_mods = phase_0_mods all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="rewards", specs=specs, all_mods=all_mods) diff --git a/tests/generators/sanity/main.py b/tests/generators/sanity/main.py index a0ec2aaae..90f9a00df 100644 --- a/tests/generators/sanity/main.py +++ b/tests/generators/sanity/main.py @@ -1,12 +1,11 @@ from eth2spec.phase0 import spec as spec_phase0 from eth2spec.altair import spec as spec_altair -from eth2spec.phase1 import spec as spec_phase1 -from eth2spec.test.context import PHASE0, PHASE1, ALTAIR +from eth2spec.test.context import PHASE0, ALTAIR from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -specs = (spec_phase0, spec_altair, spec_phase1) +specs = (spec_phase0, spec_altair) if __name__ == "__main__": @@ -17,15 +16,10 @@ if __name__ == "__main__": altair_mods = {**{key: 'eth2spec.test.altair.sanity.test_' + key for key in [ 'blocks', ]}, **phase_0_mods} # also run the previous phase 0 tests - phase_1_mods = {**{key: 'eth2spec.test.phase1.sanity.test_' + key for key in [ - 'blocks', # more phase 1 specific block tests - 'shard_blocks', - ]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, - PHASE1: phase_1_mods, } run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods) diff --git a/tests/generators/ssz_static/main.py b/tests/generators/ssz_static/main.py index e06e3edc8..77a88a8cd 100644 --- a/tests/generators/ssz_static/main.py +++ b/tests/generators/ssz_static/main.py @@ -8,9 +8,8 @@ from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.debug import random_value, encode from eth2spec.config import config_util from eth2spec.phase0 import spec as spec_phase0 -from eth2spec.phase1 import spec as spec_phase1 from eth2spec.altair import spec as spec_altair -from eth2spec.test.context import PHASE1, ALTAIR, TESTGEN_FORKS, MINIMAL, MAINNET +from eth2spec.test.context import ALTAIR, TESTGEN_FORKS, MINIMAL, MAINNET from eth2spec.utils.ssz.ssz_typing import Container from eth2spec.utils.ssz.ssz_impl import ( hash_tree_root, @@ -64,15 +63,12 @@ def create_provider(fork_name, config_name: str, seed: int, mode: random_value.R # Apply changes to presets, this affects some of the vector types. config_util.prepare_config(configs_path, config_name) reload(spec_phase0) - reload(spec_phase1) reload(spec_altair) return config_name def cases_fn() -> Iterable[gen_typing.TestCase]: count = cases_if_random if chaos or mode.is_changing() else 1 spec = spec_phase0 - if fork_name == PHASE1: - spec = spec_phase1 if fork_name == ALTAIR: spec = spec_altair From a51c5ee4c2f815e3902c370da7a6e7e931d918fe Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 29 Mar 2021 07:51:49 -0600 Subject: [PATCH 121/127] Apply suggestions from code review Co-authored-by: Hsiao-Wei Wang --- specs/altair/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 173a1e713..5f592c61a 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -354,7 +354,7 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. An aggregator needs to find the index in the sync committee (as returned by `get_sync_committee_indices`) for a given validator referenced by `sync_committee_signature.validator_index` and map the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is the one set in the `Bitvector`. -For example, a validator with index `2044` could be at index `135` in the current sync committee. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. +For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. Also note that a validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`. ###### Signature From eca6bd7d622a0cfb7343bff742da046ed25b3825 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 29 Mar 2021 11:56:43 -0700 Subject: [PATCH 122/127] Fix merge beacon chain spec's toc --- specs/merge/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index ce6df0dd9..55994188a 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -11,6 +11,7 @@ - [Introduction](#introduction) +- [Custom types](#custom-types) - [Constants](#constants) - [Transition](#transition) - [Execution](#execution) @@ -19,7 +20,6 @@ - [`BeaconBlockBody`](#beaconblockbody) - [`BeaconState`](#beaconstate) - [New containers](#new-containers) - - [`Transaction`](#transaction) - [`ApplicationPayload`](#applicationpayload) - [Helper functions](#helper-functions) - [Misc](#misc) From 430627f2907eef0f3f7e1ee26ac7a7b5c45c66cb Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Tue, 30 Mar 2021 01:33:17 +0200 Subject: [PATCH 123/127] Apply suggestions from @djrtwo Co-authored-by: Danny Ryan --- configs/mainnet/sharding.yaml | 2 +- configs/minimal/sharding.yaml | 4 ++-- specs/custody_game/validator.md | 14 +++++++------- specs/das/das-core.md | 2 +- specs/das/fork-choice.md | 6 +++--- specs/das/p2p-interface.md | 22 +++++++++++----------- specs/sharding/beacon-chain.md | 16 +++++++--------- 7 files changed, 32 insertions(+), 34 deletions(-) diff --git a/configs/mainnet/sharding.yaml b/configs/mainnet/sharding.yaml index 099a71442..3051ab024 100644 --- a/configs/mainnet/sharding.yaml +++ b/configs/mainnet/sharding.yaml @@ -40,4 +40,4 @@ SHARD_COMMITTEE_PERIOD: 256 # Signature domains # --------------------------------------------------------------- DOMAIN_SHARD_PROPOSAL: 0x80000000 -DOMAIN_SHARD_COMMITTEE: 0x81000000 \ No newline at end of file +DOMAIN_SHARD_COMMITTEE: 0x81000000 diff --git a/configs/minimal/sharding.yaml b/configs/minimal/sharding.yaml index 1b73a5769..b9497cb53 100644 --- a/configs/minimal/sharding.yaml +++ b/configs/minimal/sharding.yaml @@ -2,7 +2,7 @@ # Fork # --------------------------------------------------------------- -SHARDING_FORK_VERSION: 0x03000000 +SHARDING_FORK_VERSION: 0x03000001 # TBD SHARDING_FORK_SLOT: 0 @@ -41,4 +41,4 @@ SHARD_COMMITTEE_PERIOD: 256 # --------------------------------------------------------------- DOMAIN_SHARD_PROPOSAL: 0x80000000 DOMAIN_SHARD_COMMITTEE: 0x81000000 -DOMAIN_LIGHT_CLIENT: 0x82000000 \ No newline at end of file +DOMAIN_LIGHT_CLIENT: 0x82000000 diff --git a/specs/custody_game/validator.md b/specs/custody_game/validator.md index f4f497d7b..b4045fd4a 100644 --- a/specs/custody_game/validator.md +++ b/specs/custody_game/validator.md @@ -1,8 +1,8 @@ -# Ethereum 2.0 Phase 1 -- Honest Validator +# Ethereum 2.0 Custody Game -- Honest Validator **Notice**: This document is a work-in-progress for researchers and implementers. -This is an accompanying document to [Ethereum 2.0 Phase 1](./), which describes the expected actions of a "validator" -participating in the Ethereum 2.0 Phase 1 protocol. +This is an accompanying document to the [Ethereum 2.0 Custody Game](./), which describes the expected actions of a "validator" +participating in the Ethereum 2.0 Custody Game. ## Table of contents @@ -29,14 +29,14 @@ participating in the Ethereum 2.0 Phase 1 protocol. ## Prerequisites -This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden. +This document is an extension of the [Sharding -- Validator](../sharding/validator.md). All behaviors and definitions defined in the Sharding doc carry over unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) -docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use them as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the [Custody Game -- The Beacon Chain](./beacon-chain.md) +docs are requisite for this document and used throughout. Please see the Custody Game docs before continuing and use them as a reference throughout. ## Becoming a validator -Becoming a validator in Phase 1 is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details. +Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details. ## Beacon chain validator assignments diff --git a/specs/das/das-core.md b/specs/das/das-core.md index c6cbc09e3..ee73a6150 100644 --- a/specs/das/das-core.md +++ b/specs/das/das-core.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Data Availability Sampling Core +# Ethereum 2.0 Data Availability Sampling -- Core **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/das/fork-choice.md b/specs/das/fork-choice.md index ae105c8ef..329c033a7 100644 --- a/specs/das/fork-choice.md +++ b/specs/das/fork-choice.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Data Availability Sampling Fork Choice +# Ethereum 2.0 Data Availability Sampling -- Fork Choice **Notice**: This document is a work-in-progress for researchers and implementers. @@ -17,9 +17,9 @@ ## Introduction -This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. The only change that we add from phase 0 is that we add a concept of "data dependencies"; +This document is the beacon chain fork choice spec for Ethereum 2.0 Data Availability Sampling. The only change that we add from phase 0 is that we add a concept of "data dependencies"; a block is only eligible for consideration in the fork choice after a data availability test has been successfully completed for all dependencies. -The "root" of a shard block for data dependency purposes is considered to be a DataCommitment object, which is a pair of a Kate commitment and a length. +The "root" of a shard block for data dependency purposes is considered to be a `DataCommitment` object, which is a pair of a Kate commitment and a length. ## Dependency calculation diff --git a/specs/das/p2p-interface.md b/specs/das/p2p-interface.md index 0c8c211d4..fcc7616da 100644 --- a/specs/das/p2p-interface.md +++ b/specs/das/p2p-interface.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Data Availability Sampling Network specification +# Ethereum 2.0 Data Availability Sampling -- Network specification **Notice**: This document is a work-in-progress for researchers and implementers. @@ -68,7 +68,7 @@ At full operation, the network has one proposer, per shard, per slot. In the push-model, there are: - *Vertical subnets*: Sinks can subscribe to indices of samples: there is a sample to subnet mapping. -- *Horizontal subnets*: Sources need to distribute samples to all vertical networks: they participate in a fanout layer. +- *Horizontal subnets*: Sources need to distribute samples to all vertical networks: they participate in a fan-out layer. ### Horizontal subnets @@ -84,7 +84,7 @@ it may publish to all its peers on the subnet, instead of just those in its mesh #### Horizontal propagation -Peers on the horizontal subnet are expected to at least perform regular propagation of shard blocks, like how do would participate in any other topic. +Peers on the horizontal subnet are expected to at least perform regular propagation of shard blocks, like participation in any other topic. *Although this may be sufficient for testnets, expect parameter changes in the spec here.* @@ -137,7 +137,7 @@ Backbone subscription work is outlined in the [DAS participation spec](sampling. #### Quick Rotation: Sampling A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS participation spec](sampling.md#quick-rotation-sampling). -If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore, and if necessary in the DHT, for peers in the topic backbone. +If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore and, if necessary, in the DHT for peers in the topic backbone. ## DAS in the Gossip domain: Push @@ -148,13 +148,13 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface. |----------------------------------|---------------------------| | `das_sample_{subnet_index}` | `DASSample` | -Also see the [Phase1 general networking spec](./p2p-phase1.md) for important topics such as that of the shard-blobs and shard-headers. +Also see the [Sharding general networking spec](../sharding/p2p-interface.md) for important topics such as that of the shard-blobs and shard-headers. #### Horizontal subnets: `shard_blob_{shard}` -Extending the regular `shard_blob_{shard}` as [defined in the Phase1 networking specification](./p2p-phase1.md#shard-blobs-shard_blob_shard) +Extending the regular `shard_blob_{shard}` as [defined in the Sharding networking specification](../sharding/p2p-interface.md#shard-blobs-shard_blob_shard) -If participating in DAS, upon receiving a `signed_blob` for the first time, with a `slot` not older than `MAX_RESAMPLE_TIME`, +If participating in DAS, upon receiving a `signed_blob` for the first time with a `slot` not older than `MAX_RESAMPLE_TIME`, a subscriber of a `shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical subnets. Take `blob = signed_blob.blob`: 1. Extend the data: `extended_data = extend_data(blob.data)` @@ -171,20 +171,20 @@ against the commitment to blob polynomial, specific to that `(shard, slot)` key. The following validations MUST pass before forwarding the `sample` on the vertical subnet. - _[IGNORE]_ The commitment for the (`sample.shard`, `sample.slot`, `sample.index`) tuple must be known. - If not known, the client MAY queue the sample, if it passes formatting conditions. + If not known, the client MAY queue the sample if it passes formatting conditions. - _[REJECT]_ `sample.shard`, `sample.slot` and `sample.index` are hashed into a `sbunet_index` (TODO: define hash) which MUST match the topic `{subnet_index}` parameter. - _[REJECT]_ `sample.shard` must be within valid range: `0 <= sample.shard < get_active_shard_count(state, compute_epoch_at_slot(sample.slot))`. - _[REJECT]_ `sample.index` must be within valid range: `0 <= sample.index < sample_count`, where: - `sample_count = (points_count + POINTS_PER_SAMPLE - 1) // POINTS_PER_SAMPLE` - `points_count` is the length as claimed along with the commitment, which must be smaller than `MAX_SAMPLES_PER_BLOCK`. - _[IGNORE]_ The `sample` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- - i.e. validate that `sample.slot <= current_slot`. A client MAY queue future samples for processing at the appropriate slot, if it passed formatting conditions. + i.e. validate that `sample.slot <= current_slot`. A client MAY queue future samples for processing at the appropriate slot if it passed formatting conditions. - _[IGNORE]_ This is the first received sample with the (`sample.shard`, `sample.slot`, `sample.index`) key tuple. - _[REJECT]_ As already limited by the SSZ list-limit, it is important the sample data is well-formatted and not too large. - _[REJECT]_ The `sample.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. - _[REJECT]_ The `sample.proof` MUST be valid: `verify_sample(sample, sample_count, commitment)` -Upon receiving a valid sample, it SHOULD be retained for a buffer period, if the local node is part of the backbone that covers this sample. +Upon receiving a valid sample, it SHOULD be retained for a buffer period if the local node is part of the backbone that covers this sample. This is to serve other peers that may have missed it. @@ -194,7 +194,7 @@ To pull samples from nodes, in case of network instability when samples are unav This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../phase0/p2p-interface.md). -Note that the Phase1 DAS networking uses a different protocol prefix: `/eth2/das/req` +Note that DAS networking uses a different protocol prefix: `/eth2/das/req` The result codes are extended with: - 3: **ResourceUnavailable** -- when the request was valid but cannot be served at this point in time. diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 21a938663..673cd7744 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -80,7 +80,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | Notes | | - | - | - | | `PRIMITIVE_ROOT_OF_UNITY` | `5` | Primitive root of unity of the BLS12_381 (inner) modulus | -| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (=2) | Factor by which samples are extended for data availability encoding | +| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (= 2) | Factor by which samples are extended for data availability encoding | | `POINTS_PER_SAMPLE` | `uint64(2**3)` (= 8) | 31 * 8 = 248 bytes | | `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | @@ -90,12 +90,11 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | Notes | | - | - | - | -| `MAX_SHARDS` | `uint64(2**10)` (= 1024) | Theoretical max shard count (used to determine data structure sizes) | +| `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) | | `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count | | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | - ### Shard block configs | Name | Value | Notes | @@ -147,7 +146,7 @@ class AttestationData(Container): source: Checkpoint target: Checkpoint # Shard header root - shard_header_root: Root + shard_header_root: Root # [New in Sharding] ``` ### `BeaconBlockBody` @@ -419,8 +418,8 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) process_eth1_data(state, block.body) - process_operations(state, block.body) # [Modified] - process_application_payload(state, block.body) # [Part of the Merge] + process_operations(state, block.body) # [Modified in Sharding] + process_application_payload(state, block.body) # [New in Merge] ``` #### Operations @@ -559,7 +558,7 @@ def process_epoch(state: BeaconState) -> None: # Sharding process_pending_headers(state) - charge_confirmed_header_fees(state) + process_confirmed_header_fees(state) reset_pending_headers(state) # Final updates @@ -577,7 +576,6 @@ def process_epoch(state: BeaconState) -> None: #### Pending headers ```python - def process_pending_headers(state: BeaconState) -> None: # Pending header processing applies to the previous epoch. # Skip if `GENESIS_EPOCH` because no prior epoch to process. @@ -668,7 +666,7 @@ def reset_pending_headers(state: BeaconState) -> None: next_epoch = get_current_epoch(state) + 1 next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch) for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_IN_EPOCH): - for index in range(get_committee_count_per_slot(next_epoch) + for index in range(get_committee_count_per_slot(next_epoch): shard = compute_shard_from_committee_index(state, slot, index) committee_length = len(get_beacon_committee(state, slot, shard)) state.current_epoch_pending_shard_headers.append(PendingShardHeader( From 9420c29899736784b3d12b71ff50c8e1aea03ace Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 30 Mar 2021 01:39:01 +0200 Subject: [PATCH 124/127] custody game doc naming updates --- README.md | 2 +- specs/custody_game/{custody-game.md => beacon-chain.md} | 2 +- specs/custody_game/validator.md | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) rename specs/custody_game/{custody-game.md => beacon-chain.md} (99%) diff --git a/README.md b/README.md index 8cb545cfa..f507019fd 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Sharding follows the merge, and is divided into three parts: * [Beacon Chain changes](specs/sharding/beacon-chain.md) * [P2P Network changes](specs/sharding/p2p-interface.md) * Custody Game - Ready, dependent on sharding - * [Custody Game](specs/custody_game/custody-game.md) + * [Beacon Chain changes](specs/custody_game/beacon-chain.md) * [Validator custody work](specs/custody_game/validator.md) * Data Availability Sampling - In active R&D * Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD). diff --git a/specs/custody_game/custody-game.md b/specs/custody_game/beacon-chain.md similarity index 99% rename from specs/custody_game/custody-game.md rename to specs/custody_game/beacon-chain.md index de22548b2..ca123f5aa 100644 --- a/specs/custody_game/custody-game.md +++ b/specs/custody_game/beacon-chain.md @@ -1,4 +1,4 @@ -# Ethereum 2.0 Phase 1 -- Custody Game +# Ethereum 2.0 Custody Game -- Beacon Chain **Notice**: This document is a work-in-progress for researchers and implementers. diff --git a/specs/custody_game/validator.md b/specs/custody_game/validator.md index b4045fd4a..a495924c9 100644 --- a/specs/custody_game/validator.md +++ b/specs/custody_game/validator.md @@ -44,15 +44,15 @@ Beacon chain validator assignments to beacon committees and beacon block proposa ##### Custody slashings -Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./custody-game.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](./custody-game.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE). +Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./beacon-chain.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](beacon-chain.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE). ##### Custody key reveals -Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./custody-game.md#custodykeyreveal) objects can be included in the `block`. The custody key reveals must satisfy the verification conditions found in [custody key reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small reward for each custody key reveal included. +Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./beacon-chain.md#custodykeyreveal) objects can be included in the `block`. The custody key reveals must satisfy the verification conditions found in [custody key reveal processing](beacon-chain.md#custody-key-reveals). The validator receives a small reward for each custody key reveal included. ##### Early derived secret reveals -Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./custody-game.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secrete reveal included. +Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./beacon-chain.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](beacon-chain.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secrete reveal included. #### Construct attestation From 07b7774241f32a94fbf333ac785ea855974d7ed1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 30 Mar 2021 01:46:02 +0200 Subject: [PATCH 125/127] misc. phase1 modules refactor updates --- .gitignore | 1 - Makefile | 1 - configs/README.md | 2 +- specs/phase0/beacon-chain.md | 2 +- specs/phase0/p2p-interface.md | 2 +- specs/phase0/validator.md | 2 +- 6 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 56a3d605c..f67b17dce 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,6 @@ eth2.0-spec-tests/ # Dynamically built from Markdown spec tests/core/pyspec/eth2spec/phase0/ -tests/core/pyspec/eth2spec/phase1/ tests/core/pyspec/eth2spec/altair/ # coverage reports diff --git a/Makefile b/Makefile index b22e67c8f..4ecb3e2ed 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,6 @@ partial_clean: rm -rf $(PY_SPEC_DIR)/.pytest_cache rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache rm -rf $(PY_SPEC_DIR)/phase0 - rm -rf $(PY_SPEC_DIR)/phase1 rm -rf $(PY_SPEC_DIR)/altair rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT) rm -rf $(PY_SPEC_DIR)/.coverage diff --git a/configs/README.md b/configs/README.md index 353cd35db..eb7fac945 100644 --- a/configs/README.md +++ b/configs/README.md @@ -3,7 +3,7 @@ This directory contains a set of constants presets used for testing, testnets, and mainnet. A preset file contains all the constants known for its target. -Later-fork constants can be ignored, e.g. ignore Phase 1 constants as a client that only supports Phase 0 currently. +Later-fork constants can be ignored, e.g. ignore Sharding constants as a client that only supports Phase 0 currently. ## Forking diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index e2a17dcfc..cbd085bd3 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -139,7 +139,7 @@ This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain. At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of validators. In the initial deployment phases of Ethereum 2.0, the only mechanism to become a validator is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a validator happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior. -The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block (Phase 1) and proof-of-stake votes for a beacon block (Phase 0). +The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block (in a later Eth2 upgrade) and proof-of-stake votes for a beacon block (Phase 0). ## Notation diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 28b166651..9336fac82 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -433,7 +433,7 @@ The following validations MUST pass before forwarding the `attestation` on the s Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The correct subnet for an attestation can be calculated with `compute_subnet_for_attestation`. -`beacon_attestation_{subnet_id}` topics, are rotated through throughout the epoch in a similar fashion to rotating through shards in committees in Phase 1. +`beacon_attestation_{subnet_id}` topics, are rotated through throughout the epoch in a similar fashion to rotating through shards in committees (future Eth2 upgrade). The subnets are rotated through with `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)` subnets per slot. Unaggregated attestations are sent as `Attestation`s to the subnet topic, diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 1767d3d44..e09d7ce88 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -513,7 +513,7 @@ The `subnet_id` for the `attestation` is calculated with: def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64: """ Compute the correct subnet for an attestation for Phase 0. - Note, this mimics expected Phase 1 behavior where attestations will be mapped to their shard subnet. + Note, this mimics expected future behavior where attestations will be mapped to their shard subnet. """ slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH) committees_since_epoch_start = committees_per_slot * slots_since_epoch_start From 734444261801a133d40adcc425baf59e739dbd46 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 30 Mar 2021 01:53:36 +0200 Subject: [PATCH 126/127] update merge and sharding configs to reflect fork slot placeholder, as in #2287 --- configs/mainnet/merge.yaml | 4 ++-- configs/mainnet/sharding.yaml | 4 ++-- configs/minimal/merge.yaml | 4 ++-- configs/minimal/sharding.yaml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/configs/mainnet/merge.yaml b/configs/mainnet/merge.yaml index b9924d71d..b4667f5b5 100644 --- a/configs/mainnet/merge.yaml +++ b/configs/mainnet/merge.yaml @@ -3,5 +3,5 @@ # Fork # --------------------------------------------------------------- MERGE_FORK_VERSION: 0x02000000 -# TBD -MERGE_FORK_SLOT: 0 +# TBD, temporarily max uint64 value: 2**64 - 1 +MERGE_FORK_SLOT: 18446744073709551615 diff --git a/configs/mainnet/sharding.yaml b/configs/mainnet/sharding.yaml index 3051ab024..ab2c3f6f4 100644 --- a/configs/mainnet/sharding.yaml +++ b/configs/mainnet/sharding.yaml @@ -3,8 +3,8 @@ # Fork # --------------------------------------------------------------- SHARDING_FORK_VERSION: 0x03000000 -# TBD -SHARDING_FORK_SLOT: 0 +# TBD, temporarily max uint64 value: 2**64 - 1 +SHARDING_FORK_SLOT: 18446744073709551615 # Beacon-chain diff --git a/configs/minimal/merge.yaml b/configs/minimal/merge.yaml index f36caa832..394595d02 100644 --- a/configs/minimal/merge.yaml +++ b/configs/minimal/merge.yaml @@ -3,5 +3,5 @@ # Fork # --------------------------------------------------------------- MERGE_FORK_VERSION: 0x02000001 -# TBD -MERGE_FORK_SLOT: 0 +# TBD, temporarily max uint64 value: 2**64 - 1 +MERGE_FORK_SLOT: 18446744073709551615 diff --git a/configs/minimal/sharding.yaml b/configs/minimal/sharding.yaml index b9497cb53..f3d70111c 100644 --- a/configs/minimal/sharding.yaml +++ b/configs/minimal/sharding.yaml @@ -3,8 +3,8 @@ # Fork # --------------------------------------------------------------- SHARDING_FORK_VERSION: 0x03000001 -# TBD -SHARDING_FORK_SLOT: 0 +# TBD, temporarily max uint64 value: 2**64 - 1 +MERGE_FORK_SLOT: 18446744073709551615 # Beacon-chain From 3391f991ac3aed12a473b6b6552157aa82a5f104 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 30 Mar 2021 13:48:40 +0800 Subject: [PATCH 127/127] Minor wording fixes --- README.md | 3 +-- configs/README.md | 5 ++--- configs/mainnet/altair.yaml | 1 + configs/mainnet/custody_game.yaml | 2 ++ configs/mainnet/merge.yaml | 2 ++ configs/mainnet/sharding.yaml | 2 ++ configs/minimal/altair.yaml | 2 +- configs/minimal/custody_game.yaml | 2 ++ configs/minimal/merge.yaml | 2 ++ configs/minimal/sharding.yaml | 2 ++ 10 files changed, 17 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f507019fd..36c8ad510 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ The current features are: * [Beacon chain changes](specs/altair/beacon-chain.md) * [Altair fork](specs/altair/fork.md) * [Light client sync protocol](specs/altair/sync-protocol.md) +* [Honest Validator guide changes](specs/altair/validator.md) ### Merge @@ -83,14 +84,12 @@ The following are the broad design goals for Ethereum 2.0: * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) - ## Useful external resources * [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#) * [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB) * [Combining GHOST and Casper paper](https://arxiv.org/abs/2003.03052) - ## For spec contributors Documentation on the different components used during spec writing can be found here: diff --git a/configs/README.md b/configs/README.md index eb7fac945..15529e590 100644 --- a/configs/README.md +++ b/configs/README.md @@ -15,10 +15,9 @@ Over time, the need to sync an older state may be deprecated. In this case, the prefix on the new constant may be removed, and the old constant will keep a special name before completely being removed. A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots, etc.), and was not integrated sufficiently in any of the spec tools or implementations. -Instead, the config essentially doubles as fork definition now, e.g. changing the value for `PHASE_1_FORK_SLOT` changes the fork. +Instead, the config essentially doubles as fork definition now, e.g. changing the value for `ALTAIR_FORK_SLOT` changes the fork. Another reason to prefer forking through constants is the ability to program a forking moment based on context, instead of being limited to a static slot number. - ## Format @@ -32,4 +31,4 @@ Each preset is a key-value mapping. Presets may contain comments to describe the values. -See [`mainnet_phase0.yaml`](./mainnet_phase0.yaml) for a complete example. +See [`mainnet/phase0.yaml`](./mainnet/phase0.yaml) for a complete example. diff --git a/configs/mainnet/altair.yaml b/configs/mainnet/altair.yaml index 1e82d31ff..6fbd4d4c8 100644 --- a/configs/mainnet/altair.yaml +++ b/configs/mainnet/altair.yaml @@ -37,6 +37,7 @@ DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000 # Fork # --------------------------------------------------------------- +# 0x01000000 ALTAIR_FORK_VERSION: 0x01000000 # TBD ALTAIR_FORK_SLOT: 0 diff --git a/configs/mainnet/custody_game.yaml b/configs/mainnet/custody_game.yaml index ecb2dc377..8039d839b 100644 --- a/configs/mainnet/custody_game.yaml +++ b/configs/mainnet/custody_game.yaml @@ -1,5 +1,7 @@ # Mainnet preset - Custody Game +CONFIG_NAME: "mainnet" + # Time parameters # --------------------------------------------------------------- # 2**1 (= 2) epochs, 12.8 minutes diff --git a/configs/mainnet/merge.yaml b/configs/mainnet/merge.yaml index b4667f5b5..582e7d642 100644 --- a/configs/mainnet/merge.yaml +++ b/configs/mainnet/merge.yaml @@ -1,5 +1,7 @@ # Mainnet preset - The Merge +CONFIG_NAME: "mainnet" + # Fork # --------------------------------------------------------------- MERGE_FORK_VERSION: 0x02000000 diff --git a/configs/mainnet/sharding.yaml b/configs/mainnet/sharding.yaml index ab2c3f6f4..d44c7f550 100644 --- a/configs/mainnet/sharding.yaml +++ b/configs/mainnet/sharding.yaml @@ -1,5 +1,7 @@ # Mainnet preset - Sharding +CONFIG_NAME: "mainnet" + # Fork # --------------------------------------------------------------- SHARDING_FORK_VERSION: 0x03000000 diff --git a/configs/minimal/altair.yaml b/configs/minimal/altair.yaml index 26333fc72..9aab21a2e 100644 --- a/configs/minimal/altair.yaml +++ b/configs/minimal/altair.yaml @@ -37,7 +37,7 @@ DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000 # Fork # --------------------------------------------------------------- -# Highest byte set to 0x01 to avoid collisions with mainnet versioning +# [customized] Highest byte set to 0x01 to avoid collisions with mainnet versioning ALTAIR_FORK_VERSION: 0x01000001 # [customized] ALTAIR_FORK_SLOT: 0 diff --git a/configs/minimal/custody_game.yaml b/configs/minimal/custody_game.yaml index 8b8992fb6..1d9393e80 100644 --- a/configs/minimal/custody_game.yaml +++ b/configs/minimal/custody_game.yaml @@ -1,5 +1,7 @@ # Minimal preset - Custody Game +CONFIG_NAME: "minimal" + # Time parameters # --------------------------------------------------------------- # 2**1 (= 2) epochs, 12.8 minutes diff --git a/configs/minimal/merge.yaml b/configs/minimal/merge.yaml index 394595d02..e2857917f 100644 --- a/configs/minimal/merge.yaml +++ b/configs/minimal/merge.yaml @@ -1,5 +1,7 @@ # Minimal preset - The Merge +CONFIG_NAME: "minimal" + # Fork # --------------------------------------------------------------- MERGE_FORK_VERSION: 0x02000001 diff --git a/configs/minimal/sharding.yaml b/configs/minimal/sharding.yaml index f3d70111c..07b40181b 100644 --- a/configs/minimal/sharding.yaml +++ b/configs/minimal/sharding.yaml @@ -1,5 +1,7 @@ # Minimal preset - Sharding +CONFIG_NAME: "minimal" + # Fork # --------------------------------------------------------------- SHARDING_FORK_VERSION: 0x03000001