- name: add_flag sources: - file: beacon-chain/core/altair/attestation.go search: func AddValidatorFlag( spec: | def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags: """ Return a new ``ParticipationFlags`` adding ``flag_index`` to ``flags``. """ flag = ParticipationFlags(2**flag_index) return flags | flag - name: add_validator_to_registry#phase0 sources: - file: beacon-chain/core/altair/deposit.go search: func AddValidatorToRegistry( spec: | def add_validator_to_registry( state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 ) -> None: state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) - name: add_validator_to_registry#altair sources: - file: beacon-chain/core/altair/deposit.go search: func AddValidatorToRegistry( spec: | def add_validator_to_registry( state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 ) -> None: index = get_index_for_new_validator(state) validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) set_or_append_list(state.validators, index, validator) set_or_append_list(state.balances, index, amount) # [New in Altair] set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) set_or_append_list(state.inactivity_scores, index, uint64(0)) - name: add_validator_to_registry#electra sources: - file: beacon-chain/core/electra/deposits.go search: func AddValidatorToRegistry( spec: | def add_validator_to_registry( state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 ) -> None: index = get_index_for_new_validator(state) # [Modified in Electra:EIP7251] validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) set_or_append_list(state.validators, index, validator) set_or_append_list(state.balances, index, amount) set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) set_or_append_list(state.inactivity_scores, index, uint64(0)) - name: apply_deposit#phase0 sources: - file: beacon-chain/core/altair/deposit.go search: func ApplyDeposit( spec: | def apply_deposit( state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature, ) -> None: validator_pubkeys = [v.pubkey for v in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, ) # Fork-agnostic domain since deposits are valid across forks domain = compute_domain(DOMAIN_DEPOSIT) signing_root = compute_signing_root(deposit_message, domain) if bls.Verify(pubkey, signing_root, signature): add_validator_to_registry(state, pubkey, withdrawal_credentials, amount) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) - name: apply_deposit#electra sources: - file: beacon-chain/core/electra/deposits.go search: func ApplyDeposit( spec: | def apply_deposit( state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature, ) -> None: validator_pubkeys = [v.pubkey for v in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature): # [Modified in Electra:EIP7251] add_validator_to_registry(state, pubkey, withdrawal_credentials, Gwei(0)) else: return # [Modified in Electra:EIP7251] # Increase balance by deposit amount state.pending_deposits.append( PendingDeposit( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, signature=signature, slot=GENESIS_SLOT, # Use GENESIS_SLOT to distinguish from a pending deposit request ) ) - name: apply_light_client_update sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientUpdateFromBeaconState( spec: | def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None: store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot) update_finalized_period = compute_sync_committee_period_at_slot( update.finalized_header.beacon.slot ) if not is_next_sync_committee_known(store): assert update_finalized_period == store_period store.next_sync_committee = update.next_sync_committee elif update_finalized_period == store_period + 1: store.current_sync_committee = store.next_sync_committee store.next_sync_committee = update.next_sync_committee store.previous_max_active_participants = store.current_max_active_participants store.current_max_active_participants = 0 if update.finalized_header.beacon.slot > store.finalized_header.beacon.slot: store.finalized_header = update.finalized_header if store.finalized_header.beacon.slot > store.optimistic_header.beacon.slot: store.optimistic_header = store.finalized_header - name: apply_pending_deposit sources: - file: beacon-chain/core/electra/deposits.go search: func ApplyPendingDeposit( spec: | def apply_pending_deposit(state: BeaconState, deposit: PendingDeposit) -> None: """ Applies ``deposit`` to the ``state``. """ validator_pubkeys = [v.pubkey for v in state.validators] if deposit.pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract if is_valid_deposit_signature( deposit.pubkey, deposit.withdrawal_credentials, deposit.amount, deposit.signature ): add_validator_to_registry( state, deposit.pubkey, deposit.withdrawal_credentials, deposit.amount ) else: validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey)) increase_balance(state, validator_index, deposit.amount) - name: block_to_light_client_header#altair sources: - file: beacon-chain/light-client/lightclient.go search: func BlockToLightClientHeader( spec: | def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: return LightClientHeader( beacon=BeaconBlockHeader( slot=block.message.slot, proposer_index=block.message.proposer_index, parent_root=block.message.parent_root, state_root=block.message.state_root, body_root=hash_tree_root(block.message.body), ), ) - name: block_to_light_client_header#capella sources: - file: beacon-chain/light-client/lightclient.go search: func BlockToLightClientHeader( spec: | def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: epoch = compute_epoch_at_slot(block.message.slot) if epoch >= CAPELLA_FORK_EPOCH: payload = block.message.body.execution_payload execution_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), ) execution_branch = ExecutionBranch( compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX) ) else: # Note that during fork transitions, `finalized_header` may still point to earlier forks. # While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`), # it was not included in the corresponding light client data. To ensure compatibility # with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data. execution_header = ExecutionPayloadHeader() execution_branch = ExecutionBranch() return LightClientHeader( beacon=BeaconBlockHeader( slot=block.message.slot, proposer_index=block.message.proposer_index, parent_root=block.message.parent_root, state_root=block.message.state_root, body_root=hash_tree_root(block.message.body), ), execution=execution_header, execution_branch=execution_branch, ) - name: block_to_light_client_header#deneb sources: - file: beacon-chain/light-client/lightclient.go search: func BlockToLightClientHeader( spec: | def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: epoch = compute_epoch_at_slot(block.message.slot) if epoch >= CAPELLA_FORK_EPOCH: payload = block.message.body.execution_payload execution_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), ) # [New in Deneb:EIP4844] if epoch >= DENEB_FORK_EPOCH: execution_header.blob_gas_used = payload.blob_gas_used execution_header.excess_blob_gas = payload.excess_blob_gas execution_branch = ExecutionBranch( compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX) ) else: # Note that during fork transitions, `finalized_header` may still point to earlier forks. # While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`), # it was not included in the corresponding light client data. To ensure compatibility # with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data. execution_header = ExecutionPayloadHeader() execution_branch = ExecutionBranch() return LightClientHeader( beacon=BeaconBlockHeader( slot=block.message.slot, proposer_index=block.message.proposer_index, parent_root=block.message.parent_root, state_root=block.message.state_root, body_root=hash_tree_root(block.message.body), ), execution=execution_header, execution_branch=execution_branch, ) - name: bytes_to_uint64 sources: - file: encoding/bytesutil/integers.go search: func FromBytes8( spec: | def bytes_to_uint64(data: bytes) -> uint64: """ Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian. """ return uint64(int.from_bytes(data, ENDIANNESS)) - name: calculate_committee_fraction sources: [] spec: | def calculate_committee_fraction(state: BeaconState, committee_percent: uint64) -> Gwei: committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH return Gwei((committee_weight * committee_percent) // 100) - name: check_if_validator_active sources: - file: beacon-chain/core/helpers/validators.go search: func IsActiveValidator( spec: | def check_if_validator_active(state: BeaconState, validator_index: ValidatorIndex) -> bool: validator = state.validators[validator_index] return is_active_validator(validator, get_current_epoch(state)) - name: compute_activation_exit_epoch sources: - file: beacon-chain/core/helpers/validators.go search: func ActivationExitEpoch( spec: | def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: """ Return the epoch during which validator activations and exits initiated in ``epoch`` take effect. """ return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) - name: compute_columns_for_custody_group sources: - file: beacon-chain/core/peerdas/das_core.go search: func ComputeColumnsForCustodyGroup( spec: | def compute_columns_for_custody_group(custody_group: CustodyIndex) -> Sequence[ColumnIndex]: assert custody_group < NUMBER_OF_CUSTODY_GROUPS columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS return [ ColumnIndex(NUMBER_OF_CUSTODY_GROUPS * i + custody_group) for i in range(columns_per_group) ] - name: compute_committee sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func ComputeCommittee( spec: | def compute_committee( indices: Sequence[ValidatorIndex], seed: Bytes32, index: uint64, count: uint64 ) -> Sequence[ValidatorIndex]: """ Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``. """ start = (len(indices) * index) // count end = (len(indices) * uint64(index + 1)) // count return [ indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end) ] - name: compute_consolidation_epoch_and_update_churn sources: - file: beacon-chain/core/electra/churn.go search: func ComputeConsolidationEpochAndUpdateChurn( spec: | def compute_consolidation_epoch_and_update_churn( state: BeaconState, consolidation_balance: Gwei ) -> Epoch: earliest_consolidation_epoch = max( state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)) ) per_epoch_consolidation_churn = get_consolidation_churn_limit(state) # New epoch for consolidations. if state.earliest_consolidation_epoch < earliest_consolidation_epoch: consolidation_balance_to_consume = per_epoch_consolidation_churn else: consolidation_balance_to_consume = state.consolidation_balance_to_consume # Consolidation doesn't fit in the current earliest epoch. if consolidation_balance > consolidation_balance_to_consume: balance_to_process = consolidation_balance - consolidation_balance_to_consume additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1 earliest_consolidation_epoch += additional_epochs consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn # Consume the balance and update state variables. state.consolidation_balance_to_consume = ( consolidation_balance_to_consume - consolidation_balance ) state.earliest_consolidation_epoch = earliest_consolidation_epoch return state.earliest_consolidation_epoch - name: compute_domain sources: - file: beacon-chain/core/signing/signing_root.go search: func ComputeDomain( spec: | def compute_domain( domain_type: DomainType, fork_version: Version = None, genesis_validators_root: Root = None ) -> Domain: """ Return the domain for the ``domain_type`` and ``fork_version``. """ if fork_version is None: fork_version = GENESIS_FORK_VERSION if genesis_validators_root is None: genesis_validators_root = Root() # all bytes zero by default fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) return Domain(domain_type + fork_data_root[:28]) - name: compute_epoch_at_slot sources: - file: time/slots/slottime.go search: func ToEpoch( spec: | def compute_epoch_at_slot(slot: Slot) -> Epoch: """ Return the epoch number at ``slot``. """ return Epoch(slot // SLOTS_PER_EPOCH) - name: compute_exit_epoch_and_update_churn sources: - file: beacon-chain/state/state-native/setters_churn.go search: func (b *BeaconState) ExitEpochAndUpdateChurn( spec: | def compute_exit_epoch_and_update_churn(state: BeaconState, exit_balance: Gwei) -> Epoch: earliest_exit_epoch = max( state.earliest_exit_epoch, compute_activation_exit_epoch(get_current_epoch(state)) ) per_epoch_churn = get_activation_exit_churn_limit(state) # New epoch for exits. if state.earliest_exit_epoch < earliest_exit_epoch: exit_balance_to_consume = per_epoch_churn else: exit_balance_to_consume = state.exit_balance_to_consume # Exit doesn't fit in the current earliest epoch. if exit_balance > exit_balance_to_consume: balance_to_process = exit_balance - exit_balance_to_consume additional_epochs = (balance_to_process - 1) // per_epoch_churn + 1 earliest_exit_epoch += additional_epochs exit_balance_to_consume += additional_epochs * per_epoch_churn # Consume the balance and update state variables. state.exit_balance_to_consume = exit_balance_to_consume - exit_balance state.earliest_exit_epoch = earliest_exit_epoch return state.earliest_exit_epoch - name: compute_fork_data_root sources: - file: beacon-chain/core/signing/signing_root.go search: func computeForkDataRoot( spec: | def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: """ Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. This is used primarily in signature domains to avoid collisions across forks/chains. """ return hash_tree_root( ForkData( current_version=current_version, genesis_validators_root=genesis_validators_root, ) ) - name: compute_fork_digest#phase0 sources: - file: beacon-chain/core/signing/signing_root.go search: func ComputeForkDigest( spec: | def compute_fork_digest( genesis_validators_root: Root, epoch: Epoch, ) -> ForkDigest: """ Return the 4-byte fork digest for the ``genesis_validators_root`` at a given ``epoch``. This is a digest primarily used for domain separation on the p2p layer. 4-bytes suffices for practical separation of forks/chains. """ fork_version = compute_fork_version(epoch) base_digest = compute_fork_data_root(fork_version, genesis_validators_root) return ForkDigest(base_digest[:4]) - name: compute_fork_digest#fulu sources: - file: config/params/fork.go search: func ForkDigest( - file: config/params/config.go search: func entryWithForkDigest( spec: | def compute_fork_digest( genesis_validators_root: Root, epoch: Epoch, ) -> ForkDigest: """ Return the 4-byte fork digest for the ``genesis_validators_root`` at a given ``epoch``. This is a digest primarily used for domain separation on the p2p layer. 4-bytes suffices for practical separation of forks/chains. """ fork_version = compute_fork_version(epoch) base_digest = compute_fork_data_root(fork_version, genesis_validators_root) # [Modified in Fulu:EIP7892] # Bitmask digest with hash of blob parameters blob_parameters = get_blob_parameters(epoch) return ForkDigest( bytes( xor( base_digest, hash( uint_to_bytes(uint64(blob_parameters.epoch)) + uint_to_bytes(uint64(blob_parameters.max_blobs_per_block)) ), ) )[:4] ) - name: compute_fork_version#altair sources: - file: time/slots/slottime.go search: func ToForkVersion( - file: time/slots/slottime.go search: return version.Altair spec: | def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ if epoch >= ALTAIR_FORK_EPOCH: return ALTAIR_FORK_VERSION return GENESIS_FORK_VERSION - name: compute_fork_version#bellatrix sources: - file: time/slots/slottime.go search: func ToForkVersion( - file: time/slots/slottime.go search: return version.Bellatrix spec: | def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ if epoch >= BELLATRIX_FORK_EPOCH: return BELLATRIX_FORK_VERSION if epoch >= ALTAIR_FORK_EPOCH: return ALTAIR_FORK_VERSION return GENESIS_FORK_VERSION - name: compute_fork_version#capella sources: - file: time/slots/slottime.go search: func ToForkVersion( - file: time/slots/slottime.go search: return version.Capella spec: | def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: return BELLATRIX_FORK_VERSION if epoch >= ALTAIR_FORK_EPOCH: return ALTAIR_FORK_VERSION return GENESIS_FORK_VERSION - name: compute_fork_version#deneb sources: - file: time/slots/slottime.go search: func ToForkVersion( - file: time/slots/slottime.go search: return version.Deneb spec: | def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ if epoch >= DENEB_FORK_EPOCH: return DENEB_FORK_VERSION if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: return BELLATRIX_FORK_VERSION if epoch >= ALTAIR_FORK_EPOCH: return ALTAIR_FORK_VERSION return GENESIS_FORK_VERSION - name: compute_fork_version#electra sources: - file: time/slots/slottime.go search: func ToForkVersion( - file: time/slots/slottime.go search: return version.Electra spec: | def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ if epoch >= ELECTRA_FORK_EPOCH: return ELECTRA_FORK_VERSION if epoch >= DENEB_FORK_EPOCH: return DENEB_FORK_VERSION if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: return BELLATRIX_FORK_VERSION if epoch >= ALTAIR_FORK_EPOCH: return ALTAIR_FORK_VERSION return GENESIS_FORK_VERSION - name: compute_fork_version#fulu sources: - file: time/slots/slottime.go search: func ToForkVersion( - file: time/slots/slottime.go search: return version.Fulu spec: | def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ if epoch >= FULU_FORK_EPOCH: return FULU_FORK_VERSION if epoch >= ELECTRA_FORK_EPOCH: return ELECTRA_FORK_VERSION if epoch >= DENEB_FORK_EPOCH: return DENEB_FORK_VERSION if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: return BELLATRIX_FORK_VERSION if epoch >= ALTAIR_FORK_EPOCH: return ALTAIR_FORK_VERSION return GENESIS_FORK_VERSION - name: compute_matrix sources: [] spec: | def compute_matrix(blobs: Sequence[Blob]) -> Sequence[MatrixEntry]: """ Return the full, flattened sequence of matrix entries. This helper demonstrates the relationship between blobs and the matrix of cells/proofs. The data structure for storing cells/proofs is implementation-dependent. """ matrix = [] for blob_index, blob in enumerate(blobs): cells, proofs = compute_cells_and_kzg_proofs(blob) for cell_index, (cell, proof) in enumerate(zip(cells, proofs)): matrix.append( MatrixEntry( cell=cell, kzg_proof=proof, row_index=blob_index, column_index=cell_index, ) ) return matrix - name: compute_new_state_root sources: - file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go search: func (vs *Server) computeStateRoot( spec: | def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root: temp_state: BeaconState = state.copy() signed_block = SignedBeaconBlock(message=block) state_transition(temp_state, signed_block, validate_result=False) return hash_tree_root(temp_state) - name: compute_on_chain_aggregate sources: - file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_electra.go search: func computeOnChainAggregate( spec: | def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation: aggregates = sorted( network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0] ) data = aggregates[0].data aggregation_bits = Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT]() for a in aggregates: for b in a.aggregation_bits: aggregation_bits.append(b) signature = bls.Aggregate([a.signature for a in aggregates]) committee_indices = [get_committee_indices(a.committee_bits)[0] for a in aggregates] committee_flags = [(index in committee_indices) for index in range(0, MAX_COMMITTEES_PER_SLOT)] committee_bits = Bitvector[MAX_COMMITTEES_PER_SLOT](committee_flags) return Attestation( aggregation_bits=aggregation_bits, data=data, committee_bits=committee_bits, signature=signature, ) - name: compute_proposer_index#phase0 sources: - file: beacon-chain/core/helpers/validators.go search: func ComputeProposerIndex( spec: | def compute_proposer_index( state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32 ) -> ValidatorIndex: """ Return from ``indices`` a random index sampled by effective balance. """ assert len(indices) > 0 MAX_RANDOM_BYTE = 2**8 - 1 i = uint64(0) total = uint64(len(indices)) while True: candidate_index = indices[compute_shuffled_index(i % total, total, seed)] random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] effective_balance = state.validators[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index i += 1 - name: compute_proposer_index#electra sources: - file: beacon-chain/core/helpers/validators.go search: func ComputeProposerIndex( - file: beacon-chain/core/helpers/validators.go search: if bState.Version() >= version.Electra { spec: | def compute_proposer_index( state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32 ) -> ValidatorIndex: """ Return from ``indices`` a random index sampled by effective balance. """ assert len(indices) > 0 # [Modified in Electra] MAX_RANDOM_VALUE = 2**16 - 1 i = uint64(0) total = uint64(len(indices)) while True: candidate_index = indices[compute_shuffled_index(i % total, total, seed)] # [Modified in Electra] random_bytes = hash(seed + uint_to_bytes(i // 16)) offset = i % 16 * 2 random_value = bytes_to_uint64(random_bytes[offset : offset + 2]) effective_balance = state.validators[candidate_index].effective_balance # [Modified in Electra:EIP7251] if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value: return candidate_index i += 1 - name: compute_proposer_indices sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func PrecomputeProposerIndices( spec: | def compute_proposer_indices( state: BeaconState, epoch: Epoch, seed: Bytes32, indices: Sequence[ValidatorIndex] ) -> Vector[ValidatorIndex, SLOTS_PER_EPOCH]: """ Return the proposer indices for the given ``epoch``. """ start_slot = compute_start_slot_at_epoch(epoch) seeds = [hash(seed + uint_to_bytes(Slot(start_slot + i))) for i in range(SLOTS_PER_EPOCH)] return [compute_proposer_index(state, indices, seed) for seed in seeds] - name: compute_pulled_up_tip sources: [] spec: | def compute_pulled_up_tip(store: Store, block_root: Root) -> None: state = store.block_states[block_root].copy() # Pull up the post-state of the block to the next epoch boundary process_justification_and_finalization(state) store.unrealized_justifications[block_root] = state.current_justified_checkpoint update_unrealized_checkpoints( store, state.current_justified_checkpoint, state.finalized_checkpoint ) # If the block is from a prior epoch, apply the realized values block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot) current_epoch = get_current_store_epoch(store) if block_epoch < current_epoch: update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - name: compute_shuffled_index sources: - file: beacon-chain/core/helpers/shuffle.go search: func ComputeShuffledIndex( spec: | def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64: """ Return the shuffled index corresponding to ``seed`` (and ``index_count``). """ assert index < index_count # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) # See the 'generalized domain' algorithm on page 3 for current_round in range(SHUFFLE_ROUND_COUNT): pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count flip = (pivot + index_count - index) % index_count position = max(index, flip) source = hash( seed + uint_to_bytes(uint8(current_round)) + uint_to_bytes(uint32(position // 256)) ) byte = uint8(source[(position % 256) // 8]) bit = (byte >> (position % 8)) % 2 index = flip if bit else index return index - name: compute_signed_block_header sources: - file: consensus-types/interfaces/utils.go search: func SignedBeaconBlockHeaderFromBlock( spec: | def compute_signed_block_header(signed_block: SignedBeaconBlock) -> SignedBeaconBlockHeader: block = signed_block.message block_header = BeaconBlockHeader( slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=block.state_root, body_root=hash_tree_root(block.body), ) return SignedBeaconBlockHeader(message=block_header, signature=signed_block.signature) - name: compute_signing_root sources: - file: beacon-chain/core/signing/signing_root.go search: func ComputeSigningRoot( spec: | def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: """ Return the signing root for the corresponding signing data. """ return hash_tree_root( SigningData( object_root=hash_tree_root(ssz_object), domain=domain, ) ) - name: compute_slots_since_epoch_start sources: - file: time/slots/slottime.go search: func SinceEpochStarts( spec: | def compute_slots_since_epoch_start(slot: Slot) -> int: return slot - compute_start_slot_at_epoch(compute_epoch_at_slot(slot)) - name: compute_start_slot_at_epoch sources: - file: time/slots/slottime.go search: func EpochStart( spec: | def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: """ Return the start slot of ``epoch``. """ return Slot(epoch * SLOTS_PER_EPOCH) - name: compute_subnet_for_attestation sources: - file: beacon-chain/core/helpers/attestation.go search: func ComputeSubnetForAttestation( spec: | def compute_subnet_for_attestation( committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex ) -> SubnetID: """ Compute the correct subnet for an attestation for Phase 0. Note, this mimics expected future behavior where attestations will be mapped to their shard subnet. """ slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH) committees_since_epoch_start = committees_per_slot * slots_since_epoch_start return SubnetID((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT) - name: compute_subnet_for_blob_sidecar#deneb sources: - file: beacon-chain/sync/validate_blob.go search: func computeSubnetForBlobSidecar( spec: | def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT) - name: compute_subnet_for_blob_sidecar#electra sources: - file: beacon-chain/sync/validate_blob.go search: func computeSubnetForBlobSidecar( - file: beacon-chain/sync/validate_blob.go search: if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch { spec: | def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) - name: compute_subnet_for_data_column_sidecar sources: - file: beacon-chain/core/peerdas/p2p_interface.go search: func ComputeSubnetForDataColumnSidecar( spec: | def compute_subnet_for_data_column_sidecar(column_index: ColumnIndex) -> SubnetID: return SubnetID(column_index % DATA_COLUMN_SIDECAR_SUBNET_COUNT) - name: compute_subnets_for_sync_committee sources: - file: beacon-chain/sync/validate_sync_committee_message.go search: func (s *Service) rejectIncorrectSyncCommittee( spec: | def compute_subnets_for_sync_committee( state: BeaconState, validator_index: ValidatorIndex ) -> Set[SubnetID]: next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period( next_slot_epoch ): sync_committee = state.current_sync_committee else: sync_committee = state.next_sync_committee target_pubkey = state.validators[validator_index].pubkey sync_committee_indices = [ index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey ] return set( [ SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) for index in sync_committee_indices ] ) - name: compute_subscribed_subnet sources: - file: beacon-chain/p2p/subnets.go search: func computeSubscribedSubnet( spec: | def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID: node_id_prefix = node_id >> (NODE_ID_BITS - ATTESTATION_SUBNET_PREFIX_BITS) node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION permutation_seed = hash( uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)) ) permutated_prefix = compute_shuffled_index( node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed, ) return SubnetID((permutated_prefix + index) % ATTESTATION_SUBNET_COUNT) - name: compute_subscribed_subnets sources: - file: beacon-chain/p2p/subnets.go search: func computeSubscribedSubnets( spec: | def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]: return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)] - name: compute_sync_committee_period sources: - file: time/slots/slottime.go search: func SyncCommitteePeriod( spec: | def compute_sync_committee_period(epoch: Epoch) -> uint64: return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD - name: compute_sync_committee_period_at_slot sources: [] spec: | def compute_sync_committee_period_at_slot(slot: Slot) -> uint64: return compute_sync_committee_period(compute_epoch_at_slot(slot)) - name: compute_time_at_slot sources: - file: time/slots/slottime.go search: func StartTime( spec: | def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: slots_since_genesis = slot - GENESIS_SLOT return uint64(state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT) - name: compute_weak_subjectivity_period#phase0 sources: - file: beacon-chain/core/helpers/weak_subjectivity.go search: func ComputeWeakSubjectivityPeriod( spec: | def compute_weak_subjectivity_period(state: BeaconState) -> uint64: """ Returns the weak subjectivity period for the current ``state``. This computation takes into account the effect of: - validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and - validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch). A detailed calculation can be found at: https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf """ ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY N = len(get_active_validator_indices(state, get_current_epoch(state))) t = get_total_active_balance(state) // N // ETH_TO_GWEI T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI delta = get_validator_churn_limit(state) Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH D = SAFETY_DECAY if T * (200 + 3 * D) < t * (200 + 12 * D): epochs_for_validator_set_churn = ( N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T)) ) epochs_for_balance_top_ups = N * (200 + 3 * D) // (600 * Delta) ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups) else: ws_period += 3 * N * D * t // (200 * Delta * (T - t)) return ws_period - name: compute_weak_subjectivity_period#electra sources: [] spec: | def compute_weak_subjectivity_period(state: BeaconState) -> uint64: """ Returns the weak subjectivity period for the current ``state``. This computation takes into account the effect of: - validator set churn (bounded by ``get_balance_churn_limit()`` per epoch) A detailed calculation can be found at: https://notes.ethereum.org/@CarlBeek/electra_weak_subjectivity """ t = get_total_active_balance(state) delta = get_balance_churn_limit(state) epochs_for_validator_set_churn = SAFETY_DECAY * t // (2 * delta * 100) return MIN_VALIDATOR_WITHDRAWABILITY_DELAY + epochs_for_validator_set_churn - name: create_light_client_bootstrap sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientBootstrapFromBeaconState( spec: | def create_light_client_bootstrap( state: BeaconState, block: SignedBeaconBlock ) -> LightClientBootstrap: assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH assert state.slot == state.latest_block_header.slot header = state.latest_block_header.copy() header.state_root = hash_tree_root(state) assert hash_tree_root(header) == hash_tree_root(block.message) return LightClientBootstrap( header=block_to_light_client_header(block), current_sync_committee=state.current_sync_committee, current_sync_committee_branch=CurrentSyncCommitteeBranch( compute_merkle_proof(state, current_sync_committee_gindex_at_slot(state.slot)) ), ) - name: create_light_client_finality_update sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientFinalityUpdateFromBeaconState( spec: | def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate: return LightClientFinalityUpdate( attested_header=update.attested_header, finalized_header=update.finalized_header, finality_branch=update.finality_branch, sync_aggregate=update.sync_aggregate, signature_slot=update.signature_slot, ) - name: create_light_client_optimistic_update sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientOptimisticUpdateFromBeaconState( spec: | def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate: return LightClientOptimisticUpdate( attested_header=update.attested_header, sync_aggregate=update.sync_aggregate, signature_slot=update.signature_slot, ) - name: create_light_client_update sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientUpdateFromBeaconState( spec: | def create_light_client_update( state: BeaconState, block: SignedBeaconBlock, attested_state: BeaconState, attested_block: SignedBeaconBlock, finalized_block: Optional[SignedBeaconBlock], ) -> LightClientUpdate: assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH assert ( sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS ) assert state.slot == state.latest_block_header.slot header = state.latest_block_header.copy() header.state_root = hash_tree_root(state) assert hash_tree_root(header) == hash_tree_root(block.message) update_signature_period = compute_sync_committee_period_at_slot(block.message.slot) assert attested_state.slot == attested_state.latest_block_header.slot attested_header = attested_state.latest_block_header.copy() attested_header.state_root = hash_tree_root(attested_state) assert ( hash_tree_root(attested_header) == hash_tree_root(attested_block.message) == block.message.parent_root ) update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot) update = LightClientUpdate() update.attested_header = block_to_light_client_header(attested_block) # `next_sync_committee` is only useful if the message is signed by the current sync committee if update_attested_period == update_signature_period: update.next_sync_committee = attested_state.next_sync_committee update.next_sync_committee_branch = NextSyncCommitteeBranch( compute_merkle_proof( attested_state, next_sync_committee_gindex_at_slot(attested_state.slot) ) ) # Indicate finality whenever possible if finalized_block is not None: if finalized_block.message.slot != GENESIS_SLOT: update.finalized_header = block_to_light_client_header(finalized_block) assert ( hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root ) else: assert attested_state.finalized_checkpoint.root == Bytes32() update.finality_branch = FinalityBranch( compute_merkle_proof(attested_state, finalized_root_gindex_at_slot(attested_state.slot)) ) update.sync_aggregate = block.message.body.sync_aggregate update.signature_slot = block.message.slot return update - name: current_sync_committee_gindex_at_slot#altair sources: - file: beacon-chain/state/state-native/proofs.go search: func (b *BeaconState) CurrentSyncCommitteeGeneralizedIndex( spec: | def current_sync_committee_gindex_at_slot(_slot: Slot) -> GeneralizedIndex: return CURRENT_SYNC_COMMITTEE_GINDEX - name: current_sync_committee_gindex_at_slot#electra sources: [] spec: | def current_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex: epoch = compute_epoch_at_slot(slot) # [Modified in Electra] if epoch >= ELECTRA_FORK_EPOCH: return CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA return CURRENT_SYNC_COMMITTEE_GINDEX - name: decrease_balance sources: - file: beacon-chain/core/helpers/rewards_penalties.go search: func DecreaseBalance( spec: | def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: """ Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. """ state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta - name: eth_aggregate_pubkeys sources: - file: crypto/bls/bls.go search: func AggregatePublicKeys( spec: | def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: """ Return the aggregate public key for the public keys in ``pubkeys``. Note: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input elliptic curve points that must be decoded from the input ``BLSPubkey``s. This implementation is for demonstrative purposes only and ignores encoding/decoding concerns. Refer to the BLS signature draft standard for more information. """ assert len(pubkeys) > 0 # Ensure that the given inputs are valid pubkeys assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys) result = copy(pubkeys[0]) for pubkey in pubkeys[1:]: result += pubkey return result - name: eth_fast_aggregate_verify sources: - file: crypto/bls/blst/signature.go search: func (s *Signature) Eth2FastAggregateVerify( spec: | def eth_fast_aggregate_verify( pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature ) -> bool: """ Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. """ if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: return True return bls.FastAggregateVerify(pubkeys, message, signature) - name: filter_block_tree sources: [] spec: | def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: block = store.blocks[block_root] children = [ root for root in store.blocks.keys() if store.blocks[root].parent_root == block_root ] # If any children branches contain expected finalized/justified checkpoints, # add to filtered block-tree and signal viability to parent. if any(children): filter_block_tree_result = [filter_block_tree(store, child, blocks) for child in children] if any(filter_block_tree_result): blocks[block_root] = block return True return False current_epoch = get_current_store_epoch(store) voting_source = get_voting_source(store, block_root) # The voting source should be either at the same height as the store's justified checkpoint or # not more than two epochs ago correct_justified = ( store.justified_checkpoint.epoch == GENESIS_EPOCH or voting_source.epoch == store.justified_checkpoint.epoch or voting_source.epoch + 2 >= current_epoch ) finalized_checkpoint_block = get_checkpoint_block( store, block_root, store.finalized_checkpoint.epoch, ) correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH or store.finalized_checkpoint.root == finalized_checkpoint_block ) # If expected finalized/justified, add to viable block-tree and signal viability to parent. if correct_justified and correct_finalized: blocks[block_root] = block return True # Otherwise, branch not viable return False - name: finalized_root_gindex_at_slot#altair sources: - file: beacon-chain/state/state-native/proofs.go search: func FinalizedRootGeneralizedIndex( spec: | def finalized_root_gindex_at_slot(_slot: Slot) -> GeneralizedIndex: return FINALIZED_ROOT_GINDEX - name: finalized_root_gindex_at_slot#electra sources: [] spec: | def finalized_root_gindex_at_slot(slot: Slot) -> GeneralizedIndex: epoch = compute_epoch_at_slot(slot) # [Modified in Electra] if epoch >= ELECTRA_FORK_EPOCH: return FINALIZED_ROOT_GINDEX_ELECTRA return FINALIZED_ROOT_GINDEX - name: get_activation_exit_churn_limit sources: - file: beacon-chain/core/helpers/validator_churn.go search: func ActivationExitChurnLimit( spec: | def get_activation_exit_churn_limit(state: BeaconState) -> Gwei: """ Return the churn limit for the current epoch dedicated to activations and exits. """ return min(MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT, get_balance_churn_limit(state)) - name: get_active_validator_indices sources: - file: beacon-chain/core/helpers/validators.go search: func ActiveValidatorIndices( spec: | def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: """ Return the sequence of active validator indices at ``epoch``. """ return [ ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch) ] - name: get_aggregate_and_proof sources: [] spec: | def get_aggregate_and_proof( state: BeaconState, aggregator_index: ValidatorIndex, aggregate: Attestation, privkey: int ) -> AggregateAndProof: return AggregateAndProof( aggregator_index=aggregator_index, aggregate=aggregate, selection_proof=get_slot_signature(state, aggregate.data.slot, privkey), ) - name: get_aggregate_and_proof_signature sources: - file: validator/client/aggregate.go search: func (v *validator) aggregateAndProofSig( spec: | def get_aggregate_and_proof_signature( state: BeaconState, aggregate_and_proof: AggregateAndProof, privkey: int ) -> BLSSignature: aggregate = aggregate_and_proof.aggregate domain = get_domain( state, DOMAIN_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot) ) signing_root = compute_signing_root(aggregate_and_proof, domain) return bls.Sign(privkey, signing_root) - name: get_aggregate_signature sources: - file: crypto/bls/bls.go search: func AggregateSignatures( spec: | def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature: signatures = [attestation.signature for attestation in attestations] return bls.Aggregate(signatures) - name: get_ancestor sources: - file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go search: func (f *ForkChoice) AncestorRoot( spec: | def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: block = store.blocks[root] if block.slot > slot: return get_ancestor(store, block.parent_root, slot) return root - name: get_attestation_component_deltas sources: [] spec: | def get_attestation_component_deltas( state: BeaconState, attestations: Sequence[PendingAttestation] ) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Helper with shared logic for use by get source, target, and head deltas functions """ rewards = [Gwei(0)] * len(state.validators) penalties = [Gwei(0)] * len(state.validators) total_balance = get_total_active_balance(state) unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations) attesting_balance = get_total_balance(state, unslashed_attesting_indices) for index in get_eligible_validator_indices(state): if index in unslashed_attesting_indices: increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow if is_in_inactivity_leak(state): # Since full base reward will be canceled out by inactivity penalty deltas, # optimal participation receives full base reward compensation here. rewards[index] += get_base_reward(state, index) else: reward_numerator = get_base_reward(state, index) * (attesting_balance // increment) rewards[index] += reward_numerator // (total_balance // increment) else: penalties[index] += get_base_reward(state, index) return rewards, penalties - name: get_attestation_deltas sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func AttestationsDelta( spec: | def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attestation reward/penalty deltas for each validator. """ source_rewards, source_penalties = get_source_deltas(state) target_rewards, target_penalties = get_target_deltas(state) head_rewards, head_penalties = get_head_deltas(state) inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state) _, inactivity_penalties = get_inactivity_penalty_deltas(state) rewards = [ source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i] for i in range(len(state.validators)) ] penalties = [ source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i] for i in range(len(state.validators)) ] return rewards, penalties - name: get_attestation_participation_flag_indices#altair sources: - file: beacon-chain/core/altair/attestation.go search: func AttestationParticipationFlagIndices( spec: | def get_attestation_participation_flag_indices( state: BeaconState, data: AttestationData, inclusion_delay: uint64 ) -> Sequence[int]: """ Return the flag indices that are satisfied by an attestation. """ if data.target.epoch == get_current_epoch(state): justified_checkpoint = state.current_justified_checkpoint else: justified_checkpoint = state.previous_justified_checkpoint # Matching roots is_matching_source = data.source == justified_checkpoint is_matching_target = is_matching_source and data.target.root == get_block_root( state, data.target.epoch ) is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot( state, data.slot ) assert is_matching_source participation_flag_indices = [] if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH: participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) return participation_flag_indices - name: get_attestation_participation_flag_indices#deneb sources: - file: beacon-chain/core/altair/attestation.go search: func AttestationParticipationFlagIndices( spec: | def get_attestation_participation_flag_indices( state: BeaconState, data: AttestationData, inclusion_delay: uint64 ) -> Sequence[int]: """ Return the flag indices that are satisfied by an attestation. """ if data.target.epoch == get_current_epoch(state): justified_checkpoint = state.current_justified_checkpoint else: justified_checkpoint = state.previous_justified_checkpoint # Matching roots is_matching_source = data.source == justified_checkpoint is_matching_target = is_matching_source and data.target.root == get_block_root( state, data.target.epoch ) is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot( state, data.slot ) assert is_matching_source participation_flag_indices = [] if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) # [Modified in Deneb:EIP7045] if is_matching_target: participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) return participation_flag_indices - name: get_attestation_signature sources: - file: beacon-chain/core/blocks/signature.go search: func AttestationSignatureBatch( spec: | def get_attestation_signature( state: BeaconState, attestation_data: AttestationData, privkey: int ) -> BLSSignature: domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch) signing_root = compute_signing_root(attestation_data, domain) return bls.Sign(privkey, signing_root) - name: get_attesting_balance sources: [] spec: | def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei: """ Return the combined effective balance of the set of unslashed validators participating in ``attestations``. Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. """ return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) - name: get_attesting_indices#phase0 sources: - file: proto/prysm/v1alpha1/attestation/attestation_utils.go search: func AttestingIndices( spec: | def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[ValidatorIndex]: """ Return the set of attesting indices corresponding to ``data`` and ``bits``. """ committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) return set(index for i, index in enumerate(committee) if attestation.aggregation_bits[i]) - name: get_attesting_indices#electra sources: - file: proto/prysm/v1alpha1/attestation/attestation_utils.go search: func AttestingIndices( spec: | def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[ValidatorIndex]: """ Return the set of attesting indices corresponding to ``aggregation_bits`` and ``committee_bits``. """ output: Set[ValidatorIndex] = set() committee_indices = get_committee_indices(attestation.committee_bits) committee_offset = 0 for committee_index in committee_indices: committee = get_beacon_committee(state, attestation.data.slot, committee_index) committee_attesters = set( attester_index for i, attester_index in enumerate(committee) if attestation.aggregation_bits[committee_offset + i] ) output = output.union(committee_attesters) committee_offset += len(committee) return output - name: get_balance_churn_limit sources: - file: beacon-chain/core/helpers/validator_churn.go search: func BalanceChurnLimit( spec: | def get_balance_churn_limit(state: BeaconState) -> Gwei: """ Return the churn limit for the current epoch. """ churn = max( MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA, get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT ) return churn - churn % EFFECTIVE_BALANCE_INCREMENT - name: get_base_reward#phase0 sources: [] spec: | def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: total_balance = get_total_active_balance(state) effective_balance = state.validators[index].effective_balance return Gwei( effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH ) - name: get_base_reward#altair sources: - file: beacon-chain/core/altair/reward.go search: func BaseReward( spec: | def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Return the base reward for the validator defined by ``index`` with respect to the current ``state``. """ increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT return Gwei(increments * get_base_reward_per_increment(state)) - name: get_base_reward_per_increment sources: - file: beacon-chain/core/altair/reward.go search: func BaseRewardPerIncrement( spec: | def get_base_reward_per_increment(state: BeaconState) -> Gwei: return Gwei( EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)) ) - name: get_beacon_committee sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func BeaconCommittee( spec: | def get_beacon_committee( state: BeaconState, slot: Slot, index: CommitteeIndex ) -> Sequence[ValidatorIndex]: """ Return the beacon committee at ``slot`` for ``index``. """ epoch = compute_epoch_at_slot(slot) committees_per_slot = get_committee_count_per_slot(state, epoch) return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, count=committees_per_slot * SLOTS_PER_EPOCH, ) - name: get_beacon_proposer_index#phase0 sources: - file: beacon-chain/core/helpers/validators.go search: func BeaconProposerIndex( spec: | def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ Return the beacon proposer index at the current slot. """ epoch = get_current_epoch(state) seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) indices = get_active_validator_indices(state, epoch) return compute_proposer_index(state, indices, seed) - name: get_beacon_proposer_index#fulu sources: - file: beacon-chain/core/helpers/validators.go search: func BeaconProposerIndex( - file: beacon-chain/core/helpers/validators.go search: func beaconProposerIndexAtSlotFulu( spec: | def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ Return the beacon proposer index at the current slot. """ return state.proposer_lookahead[state.slot % SLOTS_PER_EPOCH] - name: get_beacon_proposer_indices sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func PrecomputeProposerIndices( spec: | def get_beacon_proposer_indices( state: BeaconState, epoch: Epoch ) -> Vector[ValidatorIndex, SLOTS_PER_EPOCH]: """ Return the proposer indices for the given ``epoch``. """ indices = get_active_validator_indices(state, epoch) seed = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) return compute_proposer_indices(state, epoch, seed, indices) - name: get_blob_parameters sources: [] spec: | def get_blob_parameters(epoch: Epoch) -> BlobParameters: """ Return the blob parameters at a given epoch. """ for entry in sorted(BLOB_SCHEDULE, key=lambda e: e["EPOCH"], reverse=True): if epoch >= entry["EPOCH"]: return BlobParameters(entry["EPOCH"], entry["MAX_BLOBS_PER_BLOCK"]) return BlobParameters(ELECTRA_FORK_EPOCH, MAX_BLOBS_PER_BLOCK_ELECTRA) - name: get_blob_sidecars sources: - file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deneb.go search: func BuildBlobSidecars( spec: | def get_blob_sidecars( signed_block: SignedBeaconBlock, blobs: Sequence[Blob], blob_kzg_proofs: Sequence[KZGProof] ) -> Sequence[BlobSidecar]: block = signed_block.message signed_block_header = compute_signed_block_header(signed_block) return [ BlobSidecar( index=index, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[index], kzg_proof=blob_kzg_proofs[index], signed_block_header=signed_block_header, kzg_commitment_inclusion_proof=compute_merkle_proof( block.body, get_generalized_index(BeaconBlockBody, "blob_kzg_commitments", index), ), ) for index, blob in enumerate(blobs) ] - name: get_block_root sources: - file: beacon-chain/core/helpers/block.go search: func BlockRoot( spec: | def get_block_root(state: BeaconState, epoch: Epoch) -> Root: """ Return the block root at the start of a recent ``epoch``. """ return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) - name: get_block_root_at_slot sources: - file: beacon-chain/core/helpers/block.go search: func BlockRootAtSlot( spec: | def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root: """ Return the block root at a recent ``slot``. """ assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] - name: get_block_signature sources: - file: testing/util/helpers.go search: func BlockSignature( spec: | def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(block.slot)) signing_root = compute_signing_root(block, domain) return bls.Sign(privkey, signing_root) - name: get_checkpoint_block sources: [] spec: | def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root: """ Compute the checkpoint block for epoch ``epoch`` in the chain of block ``root`` """ epoch_first_slot = compute_start_slot_at_epoch(epoch) return get_ancestor(store, root, epoch_first_slot) - name: get_committee_assignment sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func CommitteeAssignments( spec: | def get_committee_assignment( state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex ) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]: """ Return the committee assignment in the ``epoch`` for ``validator_index``. ``assignment`` returned is a tuple of the following form: * ``assignment[0]`` is the list of validators in the committee * ``assignment[1]`` is the index to which the committee is assigned * ``assignment[2]`` is the slot at which the committee is assigned Return None if no assignment. """ next_epoch = Epoch(get_current_epoch(state) + 1) assert epoch <= next_epoch start_slot = compute_start_slot_at_epoch(epoch) committee_count_per_slot = get_committee_count_per_slot(state, epoch) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): for index in range(committee_count_per_slot): committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) if validator_index in committee: return committee, CommitteeIndex(index), Slot(slot) return None - name: get_committee_count_per_slot sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func SlotCommitteeCount( spec: | def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: """ Return the number of committees in each slot for the given ``epoch``. """ return max( uint64(1), min( MAX_COMMITTEES_PER_SLOT, uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, ), ) - name: get_committee_indices sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func CommitteeIndices( spec: | def get_committee_indices(committee_bits: Bitvector) -> Sequence[CommitteeIndex]: return [CommitteeIndex(index) for index, bit in enumerate(committee_bits) if bit] - name: get_consolidation_churn_limit sources: - file: beacon-chain/core/helpers/validator_churn.go search: func ConsolidationChurnLimit( spec: | def get_consolidation_churn_limit(state: BeaconState) -> Gwei: return get_balance_churn_limit(state) - get_activation_exit_churn_limit(state) - name: get_contribution_and_proof sources: [] spec: | def get_contribution_and_proof( state: BeaconState, aggregator_index: ValidatorIndex, contribution: SyncCommitteeContribution, privkey: int, ) -> ContributionAndProof: selection_proof = get_sync_committee_selection_proof( state, contribution.slot, contribution.subcommittee_index, privkey, ) return ContributionAndProof( aggregator_index=aggregator_index, contribution=contribution, selection_proof=selection_proof, ) - name: get_contribution_and_proof_signature sources: - file: validator/client/sync_committee.go search: func (v *validator) signContributionAndProof( spec: | def get_contribution_and_proof_signature( state: BeaconState, contribution_and_proof: ContributionAndProof, privkey: int ) -> BLSSignature: contribution = contribution_and_proof.contribution domain = get_domain( state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot) ) signing_root = compute_signing_root(contribution_and_proof, domain) return bls.Sign(privkey, signing_root) - name: get_current_epoch sources: - file: beacon-chain/core/time/slot_epoch.go search: func CurrentEpoch( spec: | def get_current_epoch(state: BeaconState) -> Epoch: """ Return the current epoch. """ return compute_epoch_at_slot(state.slot) - name: get_current_slot sources: - file: time/slots/slottime.go search: func CurrentSlot( spec: | def get_current_slot(store: Store) -> Slot: return Slot(GENESIS_SLOT + get_slots_since_genesis(store)) - name: get_current_store_epoch sources: [] spec: | def get_current_store_epoch(store: Store) -> Epoch: return compute_epoch_at_slot(get_current_slot(store)) - name: get_custody_groups sources: - file: beacon-chain/core/peerdas/das_core.go search: func CustodyGroups( spec: | def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence[CustodyIndex]: assert custody_group_count <= NUMBER_OF_CUSTODY_GROUPS # Skip computation if all groups are custodied if custody_group_count == NUMBER_OF_CUSTODY_GROUPS: return [CustodyIndex(i) for i in range(NUMBER_OF_CUSTODY_GROUPS)] current_id = uint256(node_id) custody_groups: List[CustodyIndex] = [] while len(custody_groups) < custody_group_count: custody_group = CustodyIndex( bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8]) % NUMBER_OF_CUSTODY_GROUPS ) if custody_group not in custody_groups: custody_groups.append(custody_group) if current_id == UINT256_MAX: # Overflow prevention current_id = uint256(0) else: current_id += 1 assert len(custody_groups) == len(set(custody_groups)) return sorted(custody_groups) - name: get_data_column_sidecars sources: - file: beacon-chain/core/peerdas/validator.go search: func DataColumnSidecars( spec: | def get_data_column_sidecars( signed_block_header: SignedBeaconBlockHeader, kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK], kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH], cells_and_kzg_proofs: Sequence[ Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]] ], ) -> Sequence[DataColumnSidecar]: """ Given a signed block header and the commitments, inclusion proof, cells/proofs associated with each blob in the block, assemble the sidecars which can be distributed to peers. """ assert len(cells_and_kzg_proofs) == len(kzg_commitments) sidecars = [] for column_index in range(NUMBER_OF_COLUMNS): column_cells, column_proofs = [], [] for cells, proofs in cells_and_kzg_proofs: column_cells.append(cells[column_index]) column_proofs.append(proofs[column_index]) sidecars.append( DataColumnSidecar( index=column_index, column=column_cells, kzg_commitments=kzg_commitments, kzg_proofs=column_proofs, signed_block_header=signed_block_header, kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof, ) ) return sidecars - name: get_domain sources: - file: beacon-chain/core/signing/domain.go search: func Domain( spec: | def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch = None) -> Domain: """ Return the signature domain (fork version concatenated with domain type) of a message. """ epoch = get_current_epoch(state) if epoch is None else epoch fork_version = ( state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version ) return compute_domain(domain_type, fork_version, state.genesis_validators_root) - name: get_eligible_validator_indices sources: [] spec: | def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]: previous_epoch = get_previous_epoch(state) return [ ValidatorIndex(index) for index, v in enumerate(state.validators) if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch) ] - name: get_epoch_signature sources: [] spec: | def get_epoch_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature: domain = get_domain(state, DOMAIN_RANDAO, compute_epoch_at_slot(block.slot)) signing_root = compute_signing_root(compute_epoch_at_slot(block.slot), domain) return bls.Sign(privkey, signing_root) - name: get_eth1_pending_deposit_count sources: - file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go search: func (vs *Server) deposits( spec: | def get_eth1_pending_deposit_count(state: BeaconState) -> uint64: eth1_deposit_index_limit = min( state.eth1_data.deposit_count, state.deposit_requests_start_index ) if state.eth1_deposit_index < eth1_deposit_index_limit: return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) else: return uint64(0) - name: get_eth1_vote#phase0 sources: [] spec: | def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data: period_start = voting_period_start_time(state) # `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height votes_to_consider = [ get_eth1_data(block) for block in eth1_chain if ( is_candidate_block(block, period_start) # Ensure cannot move back to earlier deposit contract states and get_eth1_data(block).deposit_count >= state.eth1_data.deposit_count ) ] # Valid votes already cast during this period valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider] # Default vote on latest eth1 block data in the period range unless eth1 chain is not live # Non-substantive casting for linter state_eth1_data: Eth1Data = state.eth1_data default_vote = ( votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data ) return max( valid_votes, # Tiebreak by smallest distance key=lambda v: ( valid_votes.count(v), -valid_votes.index(v), ), default=default_vote, ) - name: get_eth1_vote#electra sources: [] spec: | def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data: # [New in Electra:EIP6110] if state.eth1_deposit_index == state.deposit_requests_start_index: return state.eth1_data period_start = voting_period_start_time(state) # `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height votes_to_consider = [ get_eth1_data(block) for block in eth1_chain if ( is_candidate_block(block, period_start) # Ensure cannot move back to earlier deposit contract states and get_eth1_data(block).deposit_count >= state.eth1_data.deposit_count ) ] # Valid votes already cast during this period valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider] # Default vote on latest eth1 block data in the period range unless eth1 chain is not live # Non-substantive casting for linter state_eth1_data: Eth1Data = state.eth1_data default_vote = ( votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data ) return max( valid_votes, # Tiebreak by smallest distance key=lambda v: ( valid_votes.count(v), -valid_votes.index(v), ), default=default_vote, ) - name: get_execution_payload sources: [] spec: | def get_execution_payload( payload_id: Optional[PayloadId], execution_engine: ExecutionEngine ) -> ExecutionPayload: if payload_id is None: # Pre-merge, empty payload return ExecutionPayload() else: return execution_engine.get_payload(payload_id).execution_payload - name: get_execution_requests sources: - file: proto/engine/v1/electra.go search: func (ebe *ExecutionBundleElectra) GetDecodedExecutionRequests( spec: | def get_execution_requests(execution_requests_list: Sequence[bytes]) -> ExecutionRequests: deposits = [] withdrawals = [] consolidations = [] request_types = [ DEPOSIT_REQUEST_TYPE, WITHDRAWAL_REQUEST_TYPE, CONSOLIDATION_REQUEST_TYPE, ] prev_request_type = None for request in execution_requests_list: request_type, request_data = request[0:1], request[1:] # Check that the request type is valid assert request_type in request_types # Check that the request data is not empty assert len(request_data) != 0 # Check that requests are in strictly ascending order # Each successive type must be greater than the last with no duplicates assert prev_request_type is None or prev_request_type < request_type prev_request_type = request_type if request_type == DEPOSIT_REQUEST_TYPE: deposits = ssz_deserialize( List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD], request_data ) elif request_type == WITHDRAWAL_REQUEST_TYPE: withdrawals = ssz_deserialize( List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD], request_data ) elif request_type == CONSOLIDATION_REQUEST_TYPE: consolidations = ssz_deserialize( List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD], request_data ) return ExecutionRequests( deposits=deposits, withdrawals=withdrawals, consolidations=consolidations, ) - name: get_execution_requests_list sources: - file: proto/engine/v1/electra.go search: func EncodeExecutionRequests( spec: | def get_execution_requests_list(execution_requests: ExecutionRequests) -> Sequence[bytes]: requests = [ (DEPOSIT_REQUEST_TYPE, execution_requests.deposits), (WITHDRAWAL_REQUEST_TYPE, execution_requests.withdrawals), (CONSOLIDATION_REQUEST_TYPE, execution_requests.consolidations), ] return [ request_type + ssz_serialize(request_data) for request_type, request_data in requests if len(request_data) != 0 ] - name: get_expected_withdrawals#capella sources: - file: beacon-chain/state/state-native/getters_withdrawal.go search: func (b *BeaconState) ExpectedWithdrawals( spec: | def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]: epoch = get_current_epoch(state) withdrawal_index = state.next_withdrawal_index validator_index = state.next_withdrawal_validator_index withdrawals: List[Withdrawal] = [] bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) for _ in range(bound): validator = state.validators[validator_index] balance = state.balances[validator_index] if is_fully_withdrawable_validator(validator, balance, epoch): withdrawals.append( Withdrawal( index=withdrawal_index, validator_index=validator_index, address=ExecutionAddress(validator.withdrawal_credentials[12:]), amount=balance, ) ) withdrawal_index += WithdrawalIndex(1) elif is_partially_withdrawable_validator(validator, balance): withdrawals.append( Withdrawal( index=withdrawal_index, validator_index=validator_index, address=ExecutionAddress(validator.withdrawal_credentials[12:]), amount=balance - MAX_EFFECTIVE_BALANCE, ) ) withdrawal_index += WithdrawalIndex(1) if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: break validator_index = ValidatorIndex((validator_index + 1) % len(state.validators)) return withdrawals - name: get_expected_withdrawals#electra sources: - file: beacon-chain/state/state-native/getters_withdrawal.go search: func (b *BeaconState) ExpectedWithdrawals( spec: | def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]: epoch = get_current_epoch(state) withdrawal_index = state.next_withdrawal_index validator_index = state.next_withdrawal_validator_index withdrawals: List[Withdrawal] = [] processed_partial_withdrawals_count = 0 # [New in Electra:EIP7251] # Consume pending partial withdrawals for withdrawal in state.pending_partial_withdrawals: if ( withdrawal.withdrawable_epoch > epoch or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP ): break validator = state.validators[withdrawal.validator_index] has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE total_withdrawn = sum( w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index ) balance = state.balances[withdrawal.validator_index] - total_withdrawn has_excess_balance = balance > MIN_ACTIVATION_BALANCE if ( validator.exit_epoch == FAR_FUTURE_EPOCH and has_sufficient_effective_balance and has_excess_balance ): withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount) withdrawals.append( Withdrawal( index=withdrawal_index, validator_index=withdrawal.validator_index, address=ExecutionAddress(validator.withdrawal_credentials[12:]), amount=withdrawable_balance, ) ) withdrawal_index += WithdrawalIndex(1) processed_partial_withdrawals_count += 1 # Sweep for remaining. bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) for _ in range(bound): validator = state.validators[validator_index] # [Modified in Electra:EIP7251] total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index) balance = state.balances[validator_index] - total_withdrawn if is_fully_withdrawable_validator(validator, balance, epoch): withdrawals.append( Withdrawal( index=withdrawal_index, validator_index=validator_index, address=ExecutionAddress(validator.withdrawal_credentials[12:]), amount=balance, ) ) withdrawal_index += WithdrawalIndex(1) elif is_partially_withdrawable_validator(validator, balance): withdrawals.append( Withdrawal( index=withdrawal_index, validator_index=validator_index, address=ExecutionAddress(validator.withdrawal_credentials[12:]), # [Modified in Electra:EIP7251] amount=balance - get_max_effective_balance(validator), ) ) withdrawal_index += WithdrawalIndex(1) if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: break validator_index = ValidatorIndex((validator_index + 1) % len(state.validators)) return withdrawals, processed_partial_withdrawals_count - name: get_filtered_block_tree sources: [] spec: | def get_filtered_block_tree(store: Store) -> Dict[Root, BeaconBlock]: """ Retrieve a filtered block tree from ``store``, only returning branches whose leaf state's justified/finalized info agrees with that in ``store``. """ base = store.justified_checkpoint.root blocks: Dict[Root, BeaconBlock] = {} filter_block_tree(store, base, blocks) return blocks - name: get_finality_delay sources: - file: beacon-chain/core/helpers/rewards_penalties.go search: func FinalityDelay( spec: | def get_finality_delay(state: BeaconState) -> uint64: return get_previous_epoch(state) - state.finalized_checkpoint.epoch - name: get_flag_index_deltas sources: - file: beacon-chain/core/altair/epoch_precompute.go search: func AttestationsDelta( spec: | def get_flag_index_deltas( state: BeaconState, flag_index: int ) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return the deltas for a given ``flag_index`` by scanning through the participation flags. """ rewards = [Gwei(0)] * len(state.validators) penalties = [Gwei(0)] * len(state.validators) previous_epoch = get_previous_epoch(state) unslashed_participating_indices = get_unslashed_participating_indices( state, flag_index, previous_epoch ) weight = PARTICIPATION_FLAG_WEIGHTS[flag_index] unslashed_participating_balance = get_total_balance(state, unslashed_participating_indices) unslashed_participating_increments = ( unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT ) active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT for index in get_eligible_validator_indices(state): base_reward = get_base_reward(state, index) if index in unslashed_participating_indices: if not is_in_inactivity_leak(state): reward_numerator = base_reward * weight * unslashed_participating_increments rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR)) elif flag_index != TIMELY_HEAD_FLAG_INDEX: penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR) return rewards, penalties - name: get_forkchoice_store sources: [] spec: | def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store: assert anchor_block.state_root == hash_tree_root(anchor_state) anchor_root = hash_tree_root(anchor_block) anchor_epoch = get_current_epoch(anchor_state) justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) proposer_boost_root = Root() return Store( time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot), genesis_time=anchor_state.genesis_time, justified_checkpoint=justified_checkpoint, finalized_checkpoint=finalized_checkpoint, unrealized_justified_checkpoint=justified_checkpoint, unrealized_finalized_checkpoint=finalized_checkpoint, proposer_boost_root=proposer_boost_root, equivocating_indices=set(), blocks={anchor_root: copy(anchor_block)}, block_states={anchor_root: copy(anchor_state)}, checkpoint_states={justified_checkpoint: copy(anchor_state)}, unrealized_justifications={anchor_root: justified_checkpoint}, ) - name: get_head sources: - file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go search: func (f *ForkChoice) Head( spec: | def get_head(store: Store) -> Root: # Get filtered block tree that only includes viable branches blocks = get_filtered_block_tree(store) # Execute the LMD-GHOST fork choice head = store.justified_checkpoint.root while True: children = [root for root in blocks.keys() if blocks[root].parent_root == head] if len(children) == 0: return head # Sort by latest attesting balance with ties broken lexicographically # Ties broken by favoring block with lexicographically higher root head = max(children, key=lambda root: (get_weight(store, root), root)) - name: get_head_deltas sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func AttestationsDelta( spec: | def get_head_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attester micro-rewards/penalties for head-vote for each validator. """ matching_head_attestations = get_matching_head_attestations(state, get_previous_epoch(state)) return get_attestation_component_deltas(state, matching_head_attestations) - name: get_inactivity_penalty_deltas#phase0 sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func AttestationsDelta( spec: | def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return inactivity reward/penalty deltas for each validator. """ penalties = [Gwei(0) for _ in range(len(state.validators))] if is_in_inactivity_leak(state): matching_target_attestations = get_matching_target_attestations( state, get_previous_epoch(state) ) matching_target_attesting_indices = get_unslashed_attesting_indices( state, matching_target_attestations ) for index in get_eligible_validator_indices(state): # If validator is performing optimally this cancels all rewards for a neutral balance base_reward = get_base_reward(state, index) penalties[index] += Gwei( BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index) ) if index not in matching_target_attesting_indices: effective_balance = state.validators[index].effective_balance penalties[index] += Gwei( effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT ) # No rewards associated with inactivity penalties rewards = [Gwei(0) for _ in range(len(state.validators))] return rewards, penalties - name: get_inactivity_penalty_deltas#altair sources: - file: beacon-chain/core/altair/epoch_precompute.go search: func AttestationsDelta( spec: | def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return the inactivity penalty deltas by considering timely target participation flags and inactivity scores. """ rewards = [Gwei(0) for _ in range(len(state.validators))] penalties = [Gwei(0) for _ in range(len(state.validators))] previous_epoch = get_previous_epoch(state) matching_target_indices = get_unslashed_participating_indices( state, TIMELY_TARGET_FLAG_INDEX, previous_epoch ) for index in get_eligible_validator_indices(state): if index not in matching_target_indices: penalty_numerator = ( state.validators[index].effective_balance * state.inactivity_scores[index] ) penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR penalties[index] += Gwei(penalty_numerator // penalty_denominator) return rewards, penalties - name: get_inactivity_penalty_deltas#bellatrix sources: - file: beacon-chain/core/altair/epoch_precompute.go search: func AttestationsDelta( spec: | def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return the inactivity penalty deltas by considering timely target participation flags and inactivity scores. """ rewards = [Gwei(0) for _ in range(len(state.validators))] penalties = [Gwei(0) for _ in range(len(state.validators))] previous_epoch = get_previous_epoch(state) matching_target_indices = get_unslashed_participating_indices( state, TIMELY_TARGET_FLAG_INDEX, previous_epoch ) for index in get_eligible_validator_indices(state): if index not in matching_target_indices: penalty_numerator = ( state.validators[index].effective_balance * state.inactivity_scores[index] ) # [Modified in Bellatrix] penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_BELLATRIX penalties[index] += Gwei(penalty_numerator // penalty_denominator) return rewards, penalties - name: get_inclusion_delay_deltas sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func AttestationsDelta( spec: | def get_inclusion_delay_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return proposer and inclusion delay micro-rewards/penalties for each validator. """ rewards = [Gwei(0) for _ in range(len(state.validators))] matching_source_attestations = get_matching_source_attestations( state, get_previous_epoch(state) ) for index in get_unslashed_attesting_indices(state, matching_source_attestations): attestation = min( [a for a in matching_source_attestations if index in get_attesting_indices(state, a)], key=lambda a: a.inclusion_delay, ) rewards[attestation.proposer_index] += get_proposer_reward(state, index) max_attester_reward = Gwei( get_base_reward(state, index) - get_proposer_reward(state, index) ) rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay) # No penalties associated with inclusion delay penalties = [Gwei(0) for _ in range(len(state.validators))] return rewards, penalties - name: get_index_for_new_validator sources: [] spec: | def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: return ValidatorIndex(len(state.validators)) - name: get_indexed_attestation sources: - file: proto/prysm/v1alpha1/attestation/attestation_utils.go search: func ConvertToIndexed( spec: | def get_indexed_attestation(state: BeaconState, attestation: Attestation) -> IndexedAttestation: """ Return the indexed attestation corresponding to ``attestation``. """ attesting_indices = get_attesting_indices(state, attestation) return IndexedAttestation( attesting_indices=sorted(attesting_indices), data=attestation.data, signature=attestation.signature, ) - name: get_lc_execution_root#capella sources: [] spec: | def get_lc_execution_root(header: LightClientHeader) -> Root: epoch = compute_epoch_at_slot(header.beacon.slot) if epoch >= CAPELLA_FORK_EPOCH: return hash_tree_root(header.execution) return Root() - name: get_lc_execution_root#deneb sources: [] spec: | def get_lc_execution_root(header: LightClientHeader) -> Root: epoch = compute_epoch_at_slot(header.beacon.slot) # [New in Deneb] if epoch >= DENEB_FORK_EPOCH: return hash_tree_root(header.execution) # [Modified in Deneb] if epoch >= CAPELLA_FORK_EPOCH: execution_header = capella.ExecutionPayloadHeader( parent_hash=header.execution.parent_hash, fee_recipient=header.execution.fee_recipient, state_root=header.execution.state_root, receipts_root=header.execution.receipts_root, logs_bloom=header.execution.logs_bloom, prev_randao=header.execution.prev_randao, block_number=header.execution.block_number, gas_limit=header.execution.gas_limit, gas_used=header.execution.gas_used, timestamp=header.execution.timestamp, extra_data=header.execution.extra_data, base_fee_per_gas=header.execution.base_fee_per_gas, block_hash=header.execution.block_hash, transactions_root=header.execution.transactions_root, withdrawals_root=header.execution.withdrawals_root, ) return hash_tree_root(execution_header) return Root() - name: get_lc_execution_root#electra sources: [] spec: | def get_lc_execution_root(header: LightClientHeader) -> Root: epoch = compute_epoch_at_slot(header.beacon.slot) # [New in Electra] if epoch >= ELECTRA_FORK_EPOCH: return hash_tree_root(header.execution) # [Modified in Electra] if epoch >= DENEB_FORK_EPOCH: execution_header = deneb.ExecutionPayloadHeader( parent_hash=header.execution.parent_hash, fee_recipient=header.execution.fee_recipient, state_root=header.execution.state_root, receipts_root=header.execution.receipts_root, logs_bloom=header.execution.logs_bloom, prev_randao=header.execution.prev_randao, block_number=header.execution.block_number, gas_limit=header.execution.gas_limit, gas_used=header.execution.gas_used, timestamp=header.execution.timestamp, extra_data=header.execution.extra_data, base_fee_per_gas=header.execution.base_fee_per_gas, block_hash=header.execution.block_hash, transactions_root=header.execution.transactions_root, withdrawals_root=header.execution.withdrawals_root, blob_gas_used=header.execution.blob_gas_used, excess_blob_gas=header.execution.excess_blob_gas, ) return hash_tree_root(execution_header) if epoch >= CAPELLA_FORK_EPOCH: execution_header = capella.ExecutionPayloadHeader( parent_hash=header.execution.parent_hash, fee_recipient=header.execution.fee_recipient, state_root=header.execution.state_root, receipts_root=header.execution.receipts_root, logs_bloom=header.execution.logs_bloom, prev_randao=header.execution.prev_randao, block_number=header.execution.block_number, gas_limit=header.execution.gas_limit, gas_used=header.execution.gas_used, timestamp=header.execution.timestamp, extra_data=header.execution.extra_data, base_fee_per_gas=header.execution.base_fee_per_gas, block_hash=header.execution.block_hash, transactions_root=header.execution.transactions_root, withdrawals_root=header.execution.withdrawals_root, ) return hash_tree_root(execution_header) return Root() - name: get_matching_head_attestations sources: [] spec: | def get_matching_head_attestations( state: BeaconState, epoch: Epoch ) -> Sequence[PendingAttestation]: return [ a for a in get_matching_target_attestations(state, epoch) if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot) ] - name: get_matching_source_attestations sources: [] spec: | def get_matching_source_attestations( state: BeaconState, epoch: Epoch ) -> Sequence[PendingAttestation]: assert epoch in (get_previous_epoch(state), get_current_epoch(state)) return ( state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations ) - name: get_matching_target_attestations sources: [] spec: | def get_matching_target_attestations( state: BeaconState, epoch: Epoch ) -> Sequence[PendingAttestation]: return [ a for a in get_matching_source_attestations(state, epoch) if a.data.target.root == get_block_root(state, epoch) ] - name: get_max_effective_balance sources: - file: beacon-chain/core/helpers/validators.go search: func ValidatorMaxEffectiveBalance( spec: | def get_max_effective_balance(validator: Validator) -> Gwei: """ Get max effective balance for ``validator``. """ if has_compounding_withdrawal_credential(validator): return MAX_EFFECTIVE_BALANCE_ELECTRA else: return MIN_ACTIVATION_BALANCE - name: get_next_sync_committee sources: - file: beacon-chain/core/altair/sync_committee.go search: func NextSyncCommittee( spec: | def get_next_sync_committee(state: BeaconState) -> SyncCommittee: """ Return the next sync committee, with possible pubkey duplicates. """ indices = get_next_sync_committee_indices(state) pubkeys = [state.validators[index].pubkey for index in indices] aggregate_pubkey = eth_aggregate_pubkeys(pubkeys) return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) - name: get_next_sync_committee_indices#altair sources: - file: beacon-chain/core/altair/sync_committee.go search: func NextSyncCommitteeIndices( spec: | def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: """ Return the sync committee indices, with possible duplicates, for the next sync committee. """ epoch = Epoch(get_current_epoch(state) + 1) MAX_RANDOM_BYTE = 2**8 - 1 active_validator_indices = get_active_validator_indices(state, epoch) active_validator_count = uint64(len(active_validator_indices)) seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) i = 0 sync_committee_indices: List[ValidatorIndex] = [] while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: shuffled_index = compute_shuffled_index( uint64(i % active_validator_count), active_validator_count, seed ) candidate_index = active_validator_indices[shuffled_index] random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] effective_balance = state.validators[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: sync_committee_indices.append(candidate_index) i += 1 return sync_committee_indices - name: get_next_sync_committee_indices#electra sources: - file: beacon-chain/core/altair/sync_committee.go search: func NextSyncCommitteeIndices( - file: beacon-chain/core/altair/sync_committee.go search: if s.Version() >= version.Electra { spec: | def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: """ Return the sync committee indices, with possible duplicates, for the next sync committee. """ epoch = Epoch(get_current_epoch(state) + 1) # [Modified in Electra] MAX_RANDOM_VALUE = 2**16 - 1 active_validator_indices = get_active_validator_indices(state, epoch) active_validator_count = uint64(len(active_validator_indices)) seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) i = uint64(0) sync_committee_indices: List[ValidatorIndex] = [] while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: shuffled_index = compute_shuffled_index( uint64(i % active_validator_count), active_validator_count, seed ) candidate_index = active_validator_indices[shuffled_index] # [Modified in Electra] random_bytes = hash(seed + uint_to_bytes(i // 16)) offset = i % 16 * 2 random_value = bytes_to_uint64(random_bytes[offset : offset + 2]) effective_balance = state.validators[candidate_index].effective_balance # [Modified in Electra:EIP7251] if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value: sync_committee_indices.append(candidate_index) i += 1 return sync_committee_indices - name: get_pending_balance_to_withdraw sources: - file: beacon-chain/state/state-native/getters_validator.go search: func (b *BeaconState) PendingBalanceToWithdraw( spec: | def get_pending_balance_to_withdraw(state: BeaconState, validator_index: ValidatorIndex) -> Gwei: return sum( withdrawal.amount for withdrawal in state.pending_partial_withdrawals if withdrawal.validator_index == validator_index ) - name: get_pow_block_at_terminal_total_difficulty sources: - file: beacon-chain/execution/engine_client.go search: func (s *Service) GetTerminalBlockHash( spec: | def get_pow_block_at_terminal_total_difficulty( pow_chain: Dict[Hash32, PowBlock], ) -> Optional[PowBlock]: # `pow_chain` abstractly represents all blocks in the PoW chain for block in pow_chain.values(): block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY if block_reached_ttd: # If genesis block, no parent exists so reaching TTD alone qualifies as valid terminal block if block.parent_hash == Hash32(): return block parent = pow_chain[block.parent_hash] parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY if not parent_reached_ttd: return block return None - name: get_previous_epoch sources: - file: beacon-chain/core/time/slot_epoch.go search: func PrevEpoch( spec: | def get_previous_epoch(state: BeaconState) -> Epoch: """` Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``). """ current_epoch = get_current_epoch(state) return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1) - name: get_proposer_head sources: [] spec: | def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root: head_block = store.blocks[head_root] parent_root = head_block.parent_root parent_block = store.blocks[parent_root] # Only re-org the head block if it arrived later than the attestation deadline. head_late = is_head_late(store, head_root) # Do not re-org on an epoch boundary where the proposer shuffling could change. shuffling_stable = is_shuffling_stable(slot) # Ensure that the FFG information of the new head will be competitive with the current head. ffg_competitive = is_ffg_competitive(store, head_root, parent_root) # Do not re-org if the chain is not finalizing with acceptable frequency. finalization_ok = is_finalization_ok(store, slot) # Only re-org if we are proposing on-time. proposing_on_time = is_proposing_on_time(store) # Only re-org a single slot at most. parent_slot_ok = parent_block.slot + 1 == head_block.slot current_time_ok = head_block.slot + 1 == slot single_slot_reorg = parent_slot_ok and current_time_ok # Check that the head has few enough votes to be overpowered by our proposer boost. assert store.proposer_boost_root != head_root # ensure boost has worn off head_weak = is_head_weak(store, head_root) # Check that the missing votes are assigned to the parent and not being hoarded. parent_strong = is_parent_strong(store, parent_root) if all( [ head_late, shuffling_stable, ffg_competitive, finalization_ok, proposing_on_time, single_slot_reorg, head_weak, parent_strong, ] ): # We can re-org the current head by building upon its parent block. return parent_root else: return head_root - name: get_proposer_reward sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func ProposersDelta( spec: | def get_proposer_reward(state: BeaconState, attesting_index: ValidatorIndex) -> Gwei: return Gwei(get_base_reward(state, attesting_index) // PROPOSER_REWARD_QUOTIENT) - name: get_proposer_score sources: [] spec: | def get_proposer_score(store: Store) -> Gwei: justified_checkpoint_state = store.checkpoint_states[store.justified_checkpoint] committee_weight = get_total_active_balance(justified_checkpoint_state) // SLOTS_PER_EPOCH return (committee_weight * PROPOSER_SCORE_BOOST) // 100 - name: get_randao_mix sources: - file: beacon-chain/core/helpers/randao.go search: func RandaoMix( spec: | def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32: """ Return the randao mix at a recent ``epoch``. """ return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] - name: get_safety_threshold sources: [] spec: | def get_safety_threshold(store: LightClientStore) -> uint64: return ( max( store.previous_max_active_participants, store.current_max_active_participants, ) // 2 ) - name: get_seed sources: - file: beacon-chain/core/helpers/randao.go search: func Seed( spec: | def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32: """ Return the seed at ``epoch``. """ mix = get_randao_mix( state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1) ) # Avoid underflow return hash(domain_type + uint_to_bytes(epoch) + mix) - name: get_slot_component_duration_ms sources: [] spec: | def get_slot_component_duration_ms(basis_points: uint64) -> uint64: """ Calculate the duration of a slot component in milliseconds. """ return basis_points * SLOT_DURATION_MS // BASIS_POINTS - name: get_slot_signature sources: [] spec: | def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature: domain = get_domain(state, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot)) signing_root = compute_signing_root(slot, domain) return bls.Sign(privkey, signing_root) - name: get_slots_since_genesis sources: - file: time/slots/slottime.go search: func CurrentSlot( spec: | def get_slots_since_genesis(store: Store) -> int: return (store.time - store.genesis_time) // SECONDS_PER_SLOT - name: get_source_deltas sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func AttestationsDelta( spec: | def get_source_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attester micro-rewards/penalties for source-vote for each validator. """ matching_source_attestations = get_matching_source_attestations( state, get_previous_epoch(state) ) return get_attestation_component_deltas(state, matching_source_attestations) - name: get_subtree_index sources: [] spec: | def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64: return uint64(generalized_index % 2 ** (floorlog2(generalized_index))) - name: get_sync_committee_message sources: [] spec: | def get_sync_committee_message( state: BeaconState, block_root: Root, validator_index: ValidatorIndex, privkey: int ) -> SyncCommitteeMessage: epoch = get_current_epoch(state) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch) signing_root = compute_signing_root(block_root, domain) signature = bls.Sign(privkey, signing_root) return SyncCommitteeMessage( slot=state.slot, beacon_block_root=block_root, validator_index=validator_index, signature=signature, ) - name: get_sync_committee_selection_proof sources: [] spec: | def get_sync_committee_selection_proof( state: BeaconState, slot: Slot, subcommittee_index: uint64, privkey: int ) -> BLSSignature: domain = get_domain(state, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, compute_epoch_at_slot(slot)) signing_data = SyncAggregatorSelectionData( slot=slot, subcommittee_index=subcommittee_index, ) signing_root = compute_signing_root(signing_data, domain) return bls.Sign(privkey, signing_root) - name: get_sync_subcommittee_pubkeys sources: - file: beacon-chain/core/altair/sync_committee.go search: func SyncSubCommitteePubkeys( spec: | def get_sync_subcommittee_pubkeys( state: BeaconState, subcommittee_index: uint64 ) -> Sequence[BLSPubkey]: # Committees assigned to `slot` sign for `slot - 1` # This creates the exceptional logic below when transitioning between sync committee periods next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period( next_slot_epoch ): sync_committee = state.current_sync_committee else: sync_committee = state.next_sync_committee # Return pubkeys for the subcommittee index sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT i = subcommittee_index * sync_subcommittee_size return sync_committee.pubkeys[i : i + sync_subcommittee_size] - name: get_target_deltas sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func AttestationsDelta( spec: | def get_target_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attester micro-rewards/penalties for target-vote for each validator. """ matching_target_attestations = get_matching_target_attestations( state, get_previous_epoch(state) ) return get_attestation_component_deltas(state, matching_target_attestations) - name: get_terminal_pow_block sources: - file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go search: func (vs *Server) getTerminalBlockHashIfExists( spec: | def get_terminal_pow_block(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]: if TERMINAL_BLOCK_HASH != Hash32(): # Terminal block hash override takes precedence over terminal total difficulty if TERMINAL_BLOCK_HASH in pow_chain: return pow_chain[TERMINAL_BLOCK_HASH] else: return None return get_pow_block_at_terminal_total_difficulty(pow_chain) - name: get_total_active_balance sources: - file: beacon-chain/core/helpers/rewards_penalties.go search: func TotalActiveBalance( spec: | def get_total_active_balance(state: BeaconState) -> Gwei: """ Return the combined effective balance of the active validators. Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. """ return get_total_balance( state, set(get_active_validator_indices(state, get_current_epoch(state))) ) - name: get_total_balance sources: - file: beacon-chain/core/helpers/rewards_penalties.go search: func TotalBalance( spec: | def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: """ Return the combined effective balance of the ``indices``. ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. Math safe up to ~10B ETH, after which this overflows uint64. """ return Gwei( max( EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices]), ) ) - name: get_unslashed_attesting_indices sources: [] spec: | def get_unslashed_attesting_indices( state: BeaconState, attestations: Sequence[PendingAttestation] ) -> Set[ValidatorIndex]: output: Set[ValidatorIndex] = set() for a in attestations: output = output.union(get_attesting_indices(state, a)) return set(filter(lambda index: not state.validators[index].slashed, output)) - name: get_unslashed_participating_indices sources: [] spec: | def get_unslashed_participating_indices( state: BeaconState, flag_index: int, epoch: Epoch ) -> Set[ValidatorIndex]: """ Return the set of validator indices that are both active and unslashed for the given ``flag_index`` and ``epoch``. """ assert epoch in (get_previous_epoch(state), get_current_epoch(state)) if epoch == get_current_epoch(state): epoch_participation = state.current_epoch_participation else: epoch_participation = state.previous_epoch_participation active_validator_indices = get_active_validator_indices(state, epoch) participating_indices = [ i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index) ] return set(filter(lambda index: not state.validators[index].slashed, participating_indices)) - name: get_validator_activation_churn_limit sources: - file: beacon-chain/core/helpers/validators.go search: func ValidatorActivationChurnLimitDeneb( spec: | def get_validator_activation_churn_limit(state: BeaconState) -> uint64: """ Return the validator activation churn limit for the current epoch. """ return min(MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT, get_validator_churn_limit(state)) - name: get_validator_churn_limit sources: - file: beacon-chain/core/helpers/validators.go search: func ValidatorExitChurnLimit( spec: | def get_validator_churn_limit(state: BeaconState) -> uint64: """ Return the validator churn limit for the current epoch. """ active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) return max( MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT ) - name: get_validator_from_deposit#phase0 sources: - file: beacon-chain/core/altair/deposit.go search: func GetValidatorFromDeposit( spec: | def get_validator_from_deposit( pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 ) -> Validator: effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, effective_balance=effective_balance, slashed=False, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, ) - name: get_validator_from_deposit#electra sources: - file: beacon-chain/core/electra/deposits.go search: func GetValidatorFromDeposit( spec: | def get_validator_from_deposit( pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64 ) -> Validator: validator = Validator( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, effective_balance=Gwei(0), slashed=False, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, ) # [Modified in Electra:EIP7251] max_effective_balance = get_max_effective_balance(validator) validator.effective_balance = min( amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance ) return validator - name: get_validators_custody_requirement sources: - file: beacon-chain/core/peerdas/validator.go search: func ValidatorsCustodyRequirement( spec: | def get_validators_custody_requirement( state: BeaconState, validator_indices: Sequence[ValidatorIndex] ) -> uint64: total_node_balance = sum( state.validators[index].effective_balance for index in validator_indices ) count = total_node_balance // BALANCE_PER_ADDITIONAL_CUSTODY_GROUP return min(max(count, VALIDATOR_CUSTODY_REQUIREMENT), NUMBER_OF_CUSTODY_GROUPS) - name: get_voting_source sources: [] spec: | def get_voting_source(store: Store, block_root: Root) -> Checkpoint: """ Compute the voting source checkpoint in event that block with root ``block_root`` is the head block """ block = store.blocks[block_root] current_epoch = get_current_store_epoch(store) block_epoch = compute_epoch_at_slot(block.slot) if current_epoch > block_epoch: # The block is from a prior epoch, the voting source will be pulled-up return store.unrealized_justifications[block_root] else: # The block is not from a prior epoch, therefore the voting source is not pulled up head_state = store.block_states[block_root] return head_state.current_justified_checkpoint - name: get_weight sources: - file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go search: func (f *ForkChoice) Weight( spec: | def get_weight(store: Store, root: Root) -> Gwei: state = store.checkpoint_states[store.justified_checkpoint] unslashed_and_active_indices = [ i for i in get_active_validator_indices(state, get_current_epoch(state)) if not state.validators[i].slashed ] attestation_score = Gwei( sum( state.validators[i].effective_balance for i in unslashed_and_active_indices if ( i in store.latest_messages and i not in store.equivocating_indices and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root ) ) ) if store.proposer_boost_root == Root(): # Return only attestation score if ``proposer_boost_root`` is not set return attestation_score # Calculate proposer score if ``proposer_boost_root`` is set proposer_score = Gwei(0) # Boost is applied if ``root`` is an ancestor of ``proposer_boost_root`` if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root: proposer_score = get_proposer_score(store) return attestation_score + proposer_score - name: has_compounding_withdrawal_credential sources: - file: beacon-chain/state/state-native/readonly_validator.go search: func (v readOnlyValidator) HasCompoundingWithdrawalCredentials( spec: | def has_compounding_withdrawal_credential(validator: Validator) -> bool: """ Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential. """ return is_compounding_withdrawal_credential(validator.withdrawal_credentials) - name: has_eth1_withdrawal_credential sources: - file: beacon-chain/state/state-native/readonly_validator.go search: func (v readOnlyValidator) HasETH1WithdrawalCredentials( spec: | def has_eth1_withdrawal_credential(validator: Validator) -> bool: """ Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. """ return validator.withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX - name: has_execution_withdrawal_credential sources: - file: beacon-chain/state/state-native/readonly_validator.go search: func (v readOnlyValidator) HasExecutionWithdrawalCredentials( spec: | def has_execution_withdrawal_credential(validator: Validator) -> bool: """ Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential. """ return ( has_eth1_withdrawal_credential(validator) # 0x01 or has_compounding_withdrawal_credential(validator) # 0x02 ) - name: has_flag sources: - file: beacon-chain/core/altair/attestation.go search: func HasValidatorFlag( spec: | def has_flag(flags: ParticipationFlags, flag_index: int) -> bool: """ Return whether ``flags`` has ``flag_index`` set. """ flag = ParticipationFlags(2**flag_index) return flags & flag == flag - name: increase_balance sources: - file: beacon-chain/core/helpers/rewards_penalties.go search: func IncreaseBalance( spec: | def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: """ Increase the validator balance at index ``index`` by ``delta``. """ state.balances[index] += delta - name: initialize_beacon_state_from_eth1 sources: - file: beacon-chain/core/transition/state.go search: func GenesisBeaconState( spec: | def initialize_beacon_state_from_eth1( eth1_block_hash: Hash32, eth1_timestamp: uint64, deposits: Sequence[Deposit] ) -> BeaconState: fork = Fork( previous_version=GENESIS_FORK_VERSION, current_version=GENESIS_FORK_VERSION, epoch=GENESIS_EPOCH, ) state = BeaconState( genesis_time=eth1_timestamp + GENESIS_DELAY, fork=fork, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy ) # Process deposits leaves = list(map(lambda deposit: deposit.data, deposits)) for index, deposit in enumerate(deposits): deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[: index + 1]) state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) process_deposit(state, deposit) # Process activations for index, validator in enumerate(state.validators): balance = state.balances[index] validator.effective_balance = min( balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE ) if validator.effective_balance == MAX_EFFECTIVE_BALANCE: validator.activation_eligibility_epoch = GENESIS_EPOCH validator.activation_epoch = GENESIS_EPOCH # Set genesis validators root for domain separation and chain versioning state.genesis_validators_root = hash_tree_root(state.validators) return state - name: initialize_light_client_store sources: - file: beacon-chain/light-client/store.go search: func NewLightClientStore( spec: | def initialize_light_client_store( trusted_block_root: Root, bootstrap: LightClientBootstrap ) -> LightClientStore: assert is_valid_light_client_header(bootstrap.header) assert hash_tree_root(bootstrap.header.beacon) == trusted_block_root assert is_valid_normalized_merkle_branch( leaf=hash_tree_root(bootstrap.current_sync_committee), branch=bootstrap.current_sync_committee_branch, gindex=current_sync_committee_gindex_at_slot(bootstrap.header.beacon.slot), root=bootstrap.header.beacon.state_root, ) return LightClientStore( finalized_header=bootstrap.header, current_sync_committee=bootstrap.current_sync_committee, next_sync_committee=SyncCommittee(), best_valid_update=None, optimistic_header=bootstrap.header, previous_max_active_participants=0, current_max_active_participants=0, ) - name: initialize_proposer_lookahead sources: - file: beacon-chain/core/helpers/beacon_committee.go search: func InitializeProposerLookahead( spec: | def initialize_proposer_lookahead( state: electra.BeaconState, ) -> Vector[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH]: """ Return the proposer indices for the full available lookahead starting from current epoch. Used to initialize the ``proposer_lookahead`` field in the beacon state at genesis and after forks. """ current_epoch = get_current_epoch(state) lookahead = [] for i in range(MIN_SEED_LOOKAHEAD + 1): lookahead.extend(get_beacon_proposer_indices(state, Epoch(current_epoch + i))) return lookahead - name: initiate_validator_exit#phase0 sources: - file: beacon-chain/core/validators/validator.go search: func InitiateValidatorExit( spec: | def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: """ Initiate the exit of the validator with index ``index``. """ # Return if validator already initiated exit validator = state.validators[index] if validator.exit_epoch != FAR_FUTURE_EPOCH: return # Compute exit queue epoch exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH] exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))]) exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch]) if exit_queue_churn >= get_validator_churn_limit(state): exit_queue_epoch += Epoch(1) # Set validator exit epoch and withdrawable epoch validator.exit_epoch = exit_queue_epoch validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) - name: initiate_validator_exit#electra sources: - file: beacon-chain/core/validators/validator.go search: func InitiateValidatorExit( spec: | def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: """ Initiate the exit of the validator with index ``index``. """ # Return if validator already initiated exit validator = state.validators[index] if validator.exit_epoch != FAR_FUTURE_EPOCH: return # Compute exit queue epoch [Modified in Electra:EIP7251] exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance) # Set validator exit epoch and withdrawable epoch validator.exit_epoch = exit_queue_epoch validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) - name: integer_squareroot sources: - file: math/math_helper.go search: func IntegerSquareRoot( spec: | def integer_squareroot(n: uint64) -> uint64: """ Return the largest integer ``x`` such that ``x**2 <= n``. """ if n == UINT64_MAX: return UINT64_MAX_SQRT x = n y = (x + 1) // 2 while y < x: x = y y = (x + n // x) // 2 return x - name: is_active_validator sources: - file: beacon-chain/core/helpers/validators.go search: func IsActiveValidator( spec: | def is_active_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is active. """ return validator.activation_epoch <= epoch < validator.exit_epoch - name: is_aggregator sources: - file: beacon-chain/core/helpers/attestation.go search: func IsAggregator( spec: | def is_aggregator( state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature ) -> bool: committee = get_beacon_committee(state, slot, index) modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE) return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0 - name: is_assigned_to_sync_committee sources: [] spec: | def is_assigned_to_sync_committee( state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex ) -> bool: sync_committee_period = compute_sync_committee_period(epoch) current_epoch = get_current_epoch(state) current_sync_committee_period = compute_sync_committee_period(current_epoch) next_sync_committee_period = current_sync_committee_period + 1 assert sync_committee_period in (current_sync_committee_period, next_sync_committee_period) pubkey = state.validators[validator_index].pubkey if sync_committee_period == current_sync_committee_period: return pubkey in state.current_sync_committee.pubkeys else: # sync_committee_period == next_sync_committee_period return pubkey in state.next_sync_committee.pubkeys - name: is_better_update sources: - file: beacon-chain/light-client/lightclient.go search: func IsBetterUpdate( spec: | def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdate) -> bool: # Compare supermajority (> 2/3) sync committee participation max_active_participants = len(new_update.sync_aggregate.sync_committee_bits) new_num_active_participants = sum(new_update.sync_aggregate.sync_committee_bits) old_num_active_participants = sum(old_update.sync_aggregate.sync_committee_bits) new_has_supermajority = new_num_active_participants * 3 >= max_active_participants * 2 old_has_supermajority = old_num_active_participants * 3 >= max_active_participants * 2 if new_has_supermajority != old_has_supermajority: return new_has_supermajority if not new_has_supermajority and new_num_active_participants != old_num_active_participants: return new_num_active_participants > old_num_active_participants # Compare presence of relevant sync committee new_has_relevant_sync_committee = is_sync_committee_update(new_update) and ( compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(new_update.signature_slot) ) old_has_relevant_sync_committee = is_sync_committee_update(old_update) and ( compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(old_update.signature_slot) ) if new_has_relevant_sync_committee != old_has_relevant_sync_committee: return new_has_relevant_sync_committee # Compare indication of any finality new_has_finality = is_finality_update(new_update) old_has_finality = is_finality_update(old_update) if new_has_finality != old_has_finality: return new_has_finality # Compare sync committee finality if new_has_finality: new_has_sync_committee_finality = compute_sync_committee_period_at_slot( new_update.finalized_header.beacon.slot ) == compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot) old_has_sync_committee_finality = compute_sync_committee_period_at_slot( old_update.finalized_header.beacon.slot ) == compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot) if new_has_sync_committee_finality != old_has_sync_committee_finality: return new_has_sync_committee_finality # Tiebreaker 1: Sync committee participation beyond supermajority if new_num_active_participants != old_num_active_participants: return new_num_active_participants > old_num_active_participants # Tiebreaker 2: Prefer older data (fewer changes to best) if new_update.attested_header.beacon.slot != old_update.attested_header.beacon.slot: return new_update.attested_header.beacon.slot < old_update.attested_header.beacon.slot # Tiebreaker 3: Prefer updates with earlier signature slots return new_update.signature_slot < old_update.signature_slot - name: is_candidate_block sources: [] spec: | def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool: return ( block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= period_start and block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 >= period_start ) - name: is_compounding_withdrawal_credential sources: [] spec: | def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool: return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX - name: is_data_available#deneb sources: - file: beacon-chain/blockchain/process_block.go search: func (s *Service) isDataAvailable( - file: beacon-chain/blockchain/process_block.go search: func (s *Service) areBlobsAvailable( spec: | def is_data_available( beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment] ) -> bool: # `retrieve_blobs_and_proofs` is implementation and context dependent # It returns all the blobs for the given block root, and raises an exception if not available # Note: the p2p network does not guarantee sidecar retrieval outside of # `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) return verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, proofs) - name: is_data_available#fulu sources: - file: beacon-chain/blockchain/process_block.go search: func (s *Service) isDataAvailable( - file: beacon-chain/blockchain/process_block.go search: func (s *Service) areDataColumnsAvailable( spec: | def is_data_available(beacon_block_root: Root) -> bool: # `retrieve_column_sidecars` is implementation and context dependent, replacing # `retrieve_blobs_and_proofs`. For the given block root, it returns all column # sidecars to sample, or raises an exception if they are not available. # The p2p network does not guarantee sidecar retrieval outside of # `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epochs. column_sidecars = retrieve_column_sidecars(beacon_block_root) return all( verify_data_column_sidecar(column_sidecar) and verify_data_column_sidecar_kzg_proofs(column_sidecar) for column_sidecar in column_sidecars ) - name: is_eligible_for_activation sources: - file: beacon-chain/core/helpers/validators.go search: func IsEligibleForActivation( spec: | def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: """ Check if ``validator`` is eligible for activation. """ return ( # Placement in queue is finalized validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch # Has not yet been activated and validator.activation_epoch == FAR_FUTURE_EPOCH ) - name: is_eligible_for_activation_queue#phase0 sources: - file: beacon-chain/core/helpers/validators.go search: func IsEligibleForActivationQueue( spec: | def is_eligible_for_activation_queue(validator: Validator) -> bool: """ Check if ``validator`` is eligible to be placed into the activation queue. """ return ( validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance == MAX_EFFECTIVE_BALANCE ) - name: is_eligible_for_activation_queue#electra sources: - file: beacon-chain/core/helpers/validators.go search: func IsEligibleForActivationQueue( spec: | def is_eligible_for_activation_queue(validator: Validator) -> bool: """ Check if ``validator`` is eligible to be placed into the activation queue. """ return ( validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH # [Modified in Electra:EIP7251] and validator.effective_balance >= MIN_ACTIVATION_BALANCE ) - name: is_execution_block sources: - file: beacon-chain/core/blocks/payload.go search: func IsExecutionBlock( spec: | def is_execution_block(block: BeaconBlock) -> bool: return block.body.execution_payload != ExecutionPayload() - name: is_execution_enabled sources: - file: beacon-chain/core/blocks/payload.go search: func IsExecutionEnabled( spec: | def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool: return is_merge_transition_block(state, body) or is_merge_transition_complete(state) - name: is_ffg_competitive sources: [] spec: | def is_ffg_competitive(store: Store, head_root: Root, parent_root: Root) -> bool: return ( store.unrealized_justifications[head_root] == store.unrealized_justifications[parent_root] ) - name: is_finality_update sources: [] spec: | def is_finality_update(update: LightClientUpdate) -> bool: return update.finality_branch != FinalityBranch() - name: is_finalization_ok sources: [] spec: | def is_finalization_ok(store: Store, slot: Slot) -> bool: epochs_since_finalization = compute_epoch_at_slot(slot) - store.finalized_checkpoint.epoch return epochs_since_finalization <= REORG_MAX_EPOCHS_SINCE_FINALIZATION - name: is_fully_withdrawable_validator#capella sources: - file: beacon-chain/core/helpers/validators.go search: func IsFullyWithdrawableValidator( spec: | def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool: """ Check if ``validator`` is fully withdrawable. """ return ( has_eth1_withdrawal_credential(validator) and validator.withdrawable_epoch <= epoch and balance > 0 ) - name: is_fully_withdrawable_validator#electra sources: - file: beacon-chain/core/helpers/validators.go search: func IsFullyWithdrawableValidator( - file: beacon-chain/core/helpers/validators.go search: if fork >= version.Electra { spec: | def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool: """ Check if ``validator`` is fully withdrawable. """ return ( # [Modified in Electra:EIP7251] has_execution_withdrawal_credential(validator) and validator.withdrawable_epoch <= epoch and balance > 0 ) - name: is_head_late sources: [] spec: | def is_head_late(store: Store, head_root: Root) -> bool: return not store.block_timeliness[head_root] - name: is_head_weak sources: [] spec: | def is_head_weak(store: Store, head_root: Root) -> bool: justified_state = store.checkpoint_states[store.justified_checkpoint] reorg_threshold = calculate_committee_fraction(justified_state, REORG_HEAD_WEIGHT_THRESHOLD) head_weight = get_weight(store, head_root) return head_weight < reorg_threshold - name: is_in_inactivity_leak sources: - file: beacon-chain/core/helpers/rewards_penalties.go search: func IsInInactivityLeak( spec: | def is_in_inactivity_leak(state: BeaconState) -> bool: return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY - name: is_merge_transition_block sources: [] spec: | def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool: return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload() - name: is_merge_transition_complete sources: - file: beacon-chain/core/blocks/payload.go search: func IsMergeTransitionComplete( spec: | def is_merge_transition_complete(state: BeaconState) -> bool: return state.latest_execution_payload_header != ExecutionPayloadHeader() - name: is_next_sync_committee_known sources: [] spec: | def is_next_sync_committee_known(store: LightClientStore) -> bool: return store.next_sync_committee != SyncCommittee() - name: is_optimistic sources: - file: beacon-chain/blockchain/chain_info.go search: func (s *Service) IsOptimistic( spec: | def is_optimistic(opt_store: OptimisticStore, block: BeaconBlock) -> bool: return hash_tree_root(block) in opt_store.optimistic_roots - name: is_optimistic_candidate_block sources: [] spec: | def is_optimistic_candidate_block( opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock ) -> bool: if is_execution_block(opt_store.blocks[block.parent_root]): return True if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot: return True return False - name: is_parent_strong sources: [] spec: | def is_parent_strong(store: Store, parent_root: Root) -> bool: justified_state = store.checkpoint_states[store.justified_checkpoint] parent_threshold = calculate_committee_fraction(justified_state, REORG_PARENT_WEIGHT_THRESHOLD) parent_weight = get_weight(store, parent_root) return parent_weight > parent_threshold - name: is_partially_withdrawable_validator#capella sources: - file: beacon-chain/core/helpers/validators.go search: func IsPartiallyWithdrawableValidator( - file: beacon-chain/core/helpers/validators.go search: func isPartiallyWithdrawableValidatorCapella( spec: | def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool: """ Check if ``validator`` is partially withdrawable. """ has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE has_excess_balance = balance > MAX_EFFECTIVE_BALANCE return ( has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance ) - name: is_partially_withdrawable_validator#electra sources: - file: beacon-chain/core/helpers/validators.go search: func IsPartiallyWithdrawableValidator( - file: beacon-chain/core/helpers/validators.go search: func isPartiallyWithdrawableValidatorElectra(v spec: | def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool: """ Check if ``validator`` is partially withdrawable. """ max_effective_balance = get_max_effective_balance(validator) # [Modified in Electra:EIP7251] has_max_effective_balance = validator.effective_balance == max_effective_balance # [Modified in Electra:EIP7251] has_excess_balance = balance > max_effective_balance return ( # [Modified in Electra:EIP7251] has_execution_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance ) - name: is_proposer sources: [] spec: | def is_proposer(state: BeaconState, validator_index: ValidatorIndex) -> bool: return get_beacon_proposer_index(state) == validator_index - name: is_proposing_on_time sources: [] spec: | def is_proposing_on_time(store: Store) -> bool: seconds_since_genesis = store.time - store.genesis_time time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS epoch = get_current_store_epoch(store) proposer_reorg_cutoff_ms = get_proposer_reorg_cutoff_ms(epoch) return time_into_slot_ms <= proposer_reorg_cutoff_ms - name: is_shuffling_stable sources: [] spec: | def is_shuffling_stable(slot: Slot) -> bool: return slot % SLOTS_PER_EPOCH != 0 - name: is_slashable_attestation_data sources: - file: beacon-chain/core/blocks/attester_slashing.go search: func IsSlashableAttestationData( spec: | def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool: """ Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules. """ return ( # Double vote (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or # Surround vote (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch) ) - name: is_slashable_validator sources: - file: beacon-chain/core/helpers/validators.go search: func IsSlashableValidator( spec: | def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is slashable. """ return (not validator.slashed) and ( validator.activation_epoch <= epoch < validator.withdrawable_epoch ) - name: is_sync_committee_aggregator sources: - file: beacon-chain/core/altair/sync_committee.go search: func IsSyncCommitteeAggregator( - file: validator/client/validator.go search: func (v *validator) isSyncCommitteeAggregator( spec: | def is_sync_committee_aggregator(signature: BLSSignature) -> bool: modulo = max( 1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, ) return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0 - name: is_sync_committee_update sources: - file: beacon-chain/light-client/lightclient.go search: func HasRelevantSyncCommittee( spec: | def is_sync_committee_update(update: LightClientUpdate) -> bool: return update.next_sync_committee_branch != NextSyncCommitteeBranch() - name: is_valid_deposit_signature sources: - file: beacon-chain/core/electra/deposits.go search: func IsValidDepositSignature( - file: beacon-chain/core/blocks/deposit.go search: func IsValidDepositSignature( spec: | def is_valid_deposit_signature( pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature ) -> bool: deposit_message = DepositMessage( pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, ) # Fork-agnostic domain since deposits are valid across forks domain = compute_domain(DOMAIN_DEPOSIT) signing_root = compute_signing_root(deposit_message, domain) return bls.Verify(pubkey, signing_root, signature) - name: is_valid_genesis_state sources: - file: beacon-chain/core/transition/state.go search: func IsValidGenesisState( spec: | def is_valid_genesis_state(state: BeaconState) -> bool: if state.genesis_time < MIN_GENESIS_TIME: return False if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: return False return True - name: is_valid_indexed_attestation sources: - file: beacon-chain/core/blocks/attestation.go search: func VerifyIndexedAttestation( spec: | def is_valid_indexed_attestation( state: BeaconState, indexed_attestation: IndexedAttestation ) -> bool: """ Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature. """ # Verify indices are sorted and unique indices = indexed_attestation.attesting_indices if len(indices) == 0 or not indices == sorted(set(indices)): return False # Verify aggregate signature pubkeys = [state.validators[i].pubkey for i in indices] domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch) signing_root = compute_signing_root(indexed_attestation.data, domain) return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature) - name: is_valid_light_client_header#altair sources: [] spec: | def is_valid_light_client_header(_header: LightClientHeader) -> bool: return True - name: is_valid_light_client_header#capella sources: [] spec: | def is_valid_light_client_header(header: LightClientHeader) -> bool: epoch = compute_epoch_at_slot(header.beacon.slot) if epoch < CAPELLA_FORK_EPOCH: return ( header.execution == ExecutionPayloadHeader() and header.execution_branch == ExecutionBranch() ) return is_valid_merkle_branch( leaf=get_lc_execution_root(header), branch=header.execution_branch, depth=floorlog2(EXECUTION_PAYLOAD_GINDEX), index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX), root=header.beacon.body_root, ) - name: is_valid_light_client_header#deneb sources: [] spec: | def is_valid_light_client_header(header: LightClientHeader) -> bool: epoch = compute_epoch_at_slot(header.beacon.slot) # [New in Deneb:EIP4844] if epoch < DENEB_FORK_EPOCH: if header.execution.blob_gas_used != uint64(0): return False if header.execution.excess_blob_gas != uint64(0): return False if epoch < CAPELLA_FORK_EPOCH: return ( header.execution == ExecutionPayloadHeader() and header.execution_branch == ExecutionBranch() ) return is_valid_merkle_branch( leaf=get_lc_execution_root(header), branch=header.execution_branch, depth=floorlog2(EXECUTION_PAYLOAD_GINDEX), index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX), root=header.beacon.body_root, ) - name: is_valid_merkle_branch sources: [] spec: | def is_valid_merkle_branch( leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root ) -> bool: """ Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``. """ value = leaf for i in range(depth): if index // (2**i) % 2: value = hash(branch[i] + value) else: value = hash(value + branch[i]) return value == root - name: is_valid_normalized_merkle_branch sources: [] spec: | def is_valid_normalized_merkle_branch( leaf: Bytes32, branch: Sequence[Bytes32], gindex: GeneralizedIndex, root: Root ) -> bool: depth = floorlog2(gindex) index = get_subtree_index(gindex) num_extra = len(branch) - depth for i in range(num_extra): if branch[i] != Bytes32(): return False return is_valid_merkle_branch(leaf, branch[num_extra:], depth, index, root) - name: is_valid_switch_to_compounding_request sources: - file: beacon-chain/core/electra/consolidations.go search: func IsValidSwitchToCompoundingRequest( spec: | def is_valid_switch_to_compounding_request( state: BeaconState, consolidation_request: ConsolidationRequest ) -> bool: # Switch to compounding requires source and target be equal if consolidation_request.source_pubkey != consolidation_request.target_pubkey: return False # Verify pubkey exists source_pubkey = consolidation_request.source_pubkey validator_pubkeys = [v.pubkey for v in state.validators] if source_pubkey not in validator_pubkeys: return False source_validator = state.validators[ValidatorIndex(validator_pubkeys.index(source_pubkey))] # Verify request has been authorized if source_validator.withdrawal_credentials[12:] != consolidation_request.source_address: return False # Verify source withdrawal credentials if not has_eth1_withdrawal_credential(source_validator): return False # Verify the source is active current_epoch = get_current_epoch(state) if not is_active_validator(source_validator, current_epoch): return False # Verify exit for source has not been initiated if source_validator.exit_epoch != FAR_FUTURE_EPOCH: return False return True - name: is_valid_terminal_pow_block sources: - file: beacon-chain/blockchain/pow_block.go search: func validateTerminalBlockDifficulties( spec: | def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool: is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY return is_total_difficulty_reached and is_parent_total_difficulty_valid - name: is_within_weak_subjectivity_period#phase0 sources: - file: beacon-chain/core/helpers/weak_subjectivity.go search: func IsWithinWeakSubjectivityPeriod( spec: | def is_within_weak_subjectivity_period( store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint ) -> bool: # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint assert get_block_root(ws_state, ws_checkpoint.epoch) == ws_checkpoint.root assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch ws_period = compute_weak_subjectivity_period(ws_state) ws_state_epoch = compute_epoch_at_slot(ws_state.slot) current_epoch = compute_epoch_at_slot(get_current_slot(store)) return current_epoch <= ws_state_epoch + ws_period - name: is_within_weak_subjectivity_period#electra sources: [] spec: | def is_within_weak_subjectivity_period( store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint ) -> bool: # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint assert get_block_root(ws_state, ws_checkpoint.epoch) == ws_checkpoint.root assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch # [Modified in Electra] ws_period = compute_weak_subjectivity_period(ws_state) ws_state_epoch = compute_epoch_at_slot(ws_state.slot) current_epoch = compute_epoch_at_slot(get_current_slot(store)) return current_epoch <= ws_state_epoch + ws_period - name: kzg_commitment_to_versioned_hash sources: - file: consensus-types/primitives/kzg.go search: func ConvertKzgCommitmentToVersionedHash( spec: | def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> VersionedHash: return VERSIONED_HASH_VERSION_KZG + hash(kzg_commitment)[1:] - name: latest_verified_ancestor sources: [] spec: | def latest_verified_ancestor(opt_store: OptimisticStore, block: BeaconBlock) -> BeaconBlock: # It is assumed that the `block` parameter is never an INVALIDATED block. while True: if not is_optimistic(opt_store, block) or block.parent_root == Root(): return block block = opt_store.blocks[block.parent_root] - name: max_compressed_len sources: - file: beacon-chain/p2p/encoder/ssz.go search: func MaxCompressedLen( spec: | def max_compressed_len(n: uint64) -> uint64: # Worst-case compressed length for a given payload of size n when using snappy: # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 return uint64(32 + n + n / 6) - name: max_message_size sources: - file: beacon-chain/p2p/pubsub.go search: func MaxMessageSize( spec: | def max_message_size() -> uint64: # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small. return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024) - name: next_sync_committee_gindex_at_slot#altair sources: - file: beacon-chain/state/state-native/proofs.go search: func (b *BeaconState) NextSyncCommitteeGeneralizedIndex( spec: | def next_sync_committee_gindex_at_slot(_slot: Slot) -> GeneralizedIndex: return NEXT_SYNC_COMMITTEE_GINDEX - name: next_sync_committee_gindex_at_slot#electra sources: [] spec: | def next_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex: epoch = compute_epoch_at_slot(slot) # [Modified in Electra] if epoch >= ELECTRA_FORK_EPOCH: return NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA return NEXT_SYNC_COMMITTEE_GINDEX - name: normalize_merkle_branch sources: [] spec: | def normalize_merkle_branch( branch: Sequence[Bytes32], gindex: GeneralizedIndex ) -> Sequence[Bytes32]: depth = floorlog2(gindex) num_extra = depth - len(branch) return [Bytes32()] * num_extra + [*branch] - name: on_attestation sources: - file: beacon-chain/blockchain/process_attestation.go search: func (s *Service) OnAttestation( spec: | def on_attestation(store: Store, attestation: Attestation, is_from_block: bool = False) -> None: """ Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire. An ``attestation`` that is asserted as invalid may be valid at a later time, consider scheduling it for later processing in such case. """ validate_on_attestation(store, attestation, is_from_block) store_target_checkpoint_state(store, attestation.data.target) # Get state at the `target` to fully validate attestation target_state = store.checkpoint_states[attestation.data.target] indexed_attestation = get_indexed_attestation(target_state, attestation) assert is_valid_indexed_attestation(target_state, indexed_attestation) # Update latest messages for attesting indices update_latest_messages(store, indexed_attestation.attesting_indices, attestation) - name: on_attester_slashing sources: - file: beacon-chain/core/blocks/attester_slashing.go search: func ProcessAttesterSlashing( spec: | def on_attester_slashing(store: Store, attester_slashing: AttesterSlashing) -> None: """ Run ``on_attester_slashing`` immediately upon receiving a new ``AttesterSlashing`` from either within a block or directly on the wire. """ attestation_1 = attester_slashing.attestation_1 attestation_2 = attester_slashing.attestation_2 assert is_slashable_attestation_data(attestation_1.data, attestation_2.data) state = store.block_states[store.justified_checkpoint.root] assert is_valid_indexed_attestation(state, attestation_1) assert is_valid_indexed_attestation(state, attestation_2) indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices) for index in indices: store.equivocating_indices.add(index) - name: on_block#phase0 sources: - file: beacon-chain/blockchain/receive_block.go search: func (s *Service) ReceiveBlock( spec: | def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states # Make a copy of the state to avoid mutability issues pre_state = copy(store.block_states[block.parent_root]) # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. assert get_current_slot(store) >= block.slot # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) assert store.finalized_checkpoint.root == finalized_checkpoint_block # Check the block is valid and compute the post-state state = pre_state.copy() block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Add new block to the store store.blocks[block_root] = block # Add new state for this block to the store store.block_states[block_root] = state # Add block timeliness to the store seconds_since_genesis = store.time - store.genesis_time time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS epoch = get_current_store_epoch(store) attestation_threshold_ms = get_attestation_due_ms(epoch) is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval store.block_timeliness[hash_tree_root(block)] = is_timely # Add proposer score boost if the block is timely and not conflicting with an existing block is_first_block = store.proposer_boost_root == Root() if is_timely and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) # Eagerly compute unrealized justification and finality compute_pulled_up_tip(store, block_root) - name: on_block#bellatrix sources: - file: beacon-chain/blockchain/receive_block.go search: func (s *Service) ReceiveBlock( spec: | def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: """ Run ``on_block`` upon receiving a new block. A block that is asserted as invalid due to unavailable PoW block may be valid at a later time, consider scheduling it for later processing in such case. """ block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states # Make a copy of the state to avoid mutability issues pre_state = copy(store.block_states[block.parent_root]) # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. assert get_current_slot(store) >= block.slot # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) assert store.finalized_checkpoint.root == finalized_checkpoint_block # Check the block is valid and compute the post-state state = pre_state.copy() block_root = hash_tree_root(block) state_transition(state, signed_block, True) # [New in Bellatrix] if is_merge_transition_block(pre_state, block.body): validate_merge_block(block) # Add new block to the store store.blocks[block_root] = block # Add new state for this block to the store store.block_states[block_root] = state # Add block timeliness to the store seconds_since_genesis = store.time - store.genesis_time time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS epoch = get_current_store_epoch(store) attestation_threshold_ms = get_attestation_due_ms(epoch) is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval store.block_timeliness[hash_tree_root(block)] = is_timely # Add proposer score boost if the block is timely and not conflicting with an existing block is_first_block = store.proposer_boost_root == Root() if is_timely and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) # Eagerly compute unrealized justification and finality. compute_pulled_up_tip(store, block_root) - name: on_block#capella sources: - file: beacon-chain/blockchain/receive_block.go search: func (s *Service) ReceiveBlock( spec: | def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: """ Run ``on_block`` upon receiving a new block. """ block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. assert get_current_slot(store) >= block.slot # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) assert store.finalized_checkpoint.root == finalized_checkpoint_block # Check the block is valid and compute the post-state # Make a copy of the state to avoid mutability issues state = copy(store.block_states[block.parent_root]) block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Add new block to the store store.blocks[block_root] = block # Add new state for this block to the store store.block_states[block_root] = state # Add block timeliness to the store seconds_since_genesis = store.time - store.genesis_time time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS epoch = get_current_store_epoch(store) attestation_threshold_ms = get_attestation_due_ms(epoch) is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval store.block_timeliness[hash_tree_root(block)] = is_timely # Add proposer score boost if the block is timely and not conflicting with an existing block is_first_block = store.proposer_boost_root == Root() if is_timely and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) # Eagerly compute unrealized justification and finality. compute_pulled_up_tip(store, block_root) - name: on_block#deneb sources: - file: beacon-chain/blockchain/receive_block.go search: func (s *Service) ReceiveBlock( spec: | def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: """ Run ``on_block`` upon receiving a new block. """ block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. assert get_current_slot(store) >= block.slot # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) assert store.finalized_checkpoint.root == finalized_checkpoint_block # [New in Deneb:EIP4844] # Check if blob data is available # If not, this payload MAY be queued and subsequently considered when blob data becomes available assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments) # Check the block is valid and compute the post-state # Make a copy of the state to avoid mutability issues state = copy(store.block_states[block.parent_root]) block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Add new block to the store store.blocks[block_root] = block # Add new state for this block to the store store.block_states[block_root] = state # Add block timeliness to the store seconds_since_genesis = store.time - store.genesis_time time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS epoch = get_current_store_epoch(store) attestation_threshold_ms = get_attestation_due_ms(epoch) is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval store.block_timeliness[hash_tree_root(block)] = is_timely # Add proposer score boost if the block is timely and not conflicting with an existing block is_first_block = store.proposer_boost_root == Root() if is_timely and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) # Eagerly compute unrealized justification and finality. compute_pulled_up_tip(store, block_root) - name: on_block#fulu sources: - file: beacon-chain/blockchain/receive_block.go search: func (s *Service) ReceiveBlock( spec: | def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: """ Run ``on_block`` upon receiving a new block. """ block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states # Make a copy of the state to avoid mutability issues state = copy(store.block_states[block.parent_root]) # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past. assert get_current_slot(store) >= block.slot # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) assert store.finalized_checkpoint.root == finalized_checkpoint_block # [Modified in Fulu:EIP7594] # Check if blob data is available # If not, this payload MAY be queued and subsequently considered when blob data becomes available assert is_data_available(hash_tree_root(block)) # Check the block is valid and compute the post-state block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Add new block to the store store.blocks[block_root] = block # Add new state for this block to the store store.block_states[block_root] = state # Add block timeliness to the store seconds_since_genesis = store.time - store.genesis_time time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS epoch = get_current_store_epoch(store) attestation_threshold_ms = get_attestation_due_ms(epoch) is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval store.block_timeliness[hash_tree_root(block)] = is_timely # Add proposer score boost if the block is timely and not conflicting with an existing block is_first_block = store.proposer_boost_root == Root() if is_timely and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) # Eagerly compute unrealized justification and finality. compute_pulled_up_tip(store, block_root) - name: on_tick sources: [] spec: | def on_tick(store: Store, time: uint64) -> None: # If the ``store.time`` falls behind, while loop catches up slot by slot # to ensure that every previous slot is processed with ``on_tick_per_slot`` tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT while get_current_slot(store) < tick_slot: previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT on_tick_per_slot(store, previous_time) on_tick_per_slot(store, time) - name: on_tick_per_slot sources: [] spec: | def on_tick_per_slot(store: Store, time: uint64) -> None: previous_slot = get_current_slot(store) # Update store time store.time = time current_slot = get_current_slot(store) # If this is a new slot, reset store.proposer_boost_root if current_slot > previous_slot: store.proposer_boost_root = Root() # If a new epoch, pull-up justification and finalization from previous epoch if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: update_checkpoints( store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint ) - name: prepare_execution_payload#bellatrix sources: [] spec: | def prepare_execution_payload( state: BeaconState, safe_block_hash: Hash32, finalized_block_hash: Hash32, suggested_fee_recipient: ExecutionAddress, execution_engine: ExecutionEngine, pow_chain: Optional[Dict[Hash32, PowBlock]] = None, ) -> Optional[PayloadId]: if not is_merge_transition_complete(state): assert pow_chain is not None is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32() is_activation_epoch_reached = ( get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH ) if is_terminal_block_hash_set and not is_activation_epoch_reached: # Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed return None terminal_pow_block = get_terminal_pow_block(pow_chain) if terminal_pow_block is None: # Pre-merge, no prepare payload call is needed return None # Signify merge via producing on top of the terminal PoW block parent_hash = terminal_pow_block.block_hash else: # Post-merge, normal payload parent_hash = state.latest_execution_payload_header.block_hash # Set the forkchoice head and initiate the payload build process payload_attributes = PayloadAttributes( timestamp=compute_time_at_slot(state, state.slot), prev_randao=get_randao_mix(state, get_current_epoch(state)), suggested_fee_recipient=suggested_fee_recipient, ) return execution_engine.notify_forkchoice_updated( head_block_hash=parent_hash, safe_block_hash=safe_block_hash, finalized_block_hash=finalized_block_hash, payload_attributes=payload_attributes, ) - name: prepare_execution_payload#capella sources: [] spec: | def prepare_execution_payload( state: BeaconState, safe_block_hash: Hash32, finalized_block_hash: Hash32, suggested_fee_recipient: ExecutionAddress, execution_engine: ExecutionEngine, ) -> Optional[PayloadId]: # [Modified in Capella] # Removed `is_merge_transition_complete` check parent_hash = state.latest_execution_payload_header.block_hash # Set the forkchoice head and initiate the payload build process payload_attributes = PayloadAttributes( timestamp=compute_time_at_slot(state, state.slot), prev_randao=get_randao_mix(state, get_current_epoch(state)), suggested_fee_recipient=suggested_fee_recipient, # [New in Capella] withdrawals=get_expected_withdrawals(state), ) return execution_engine.notify_forkchoice_updated( head_block_hash=parent_hash, safe_block_hash=safe_block_hash, finalized_block_hash=finalized_block_hash, payload_attributes=payload_attributes, ) - name: prepare_execution_payload#deneb sources: [] spec: | def prepare_execution_payload( state: BeaconState, safe_block_hash: Hash32, finalized_block_hash: Hash32, suggested_fee_recipient: ExecutionAddress, execution_engine: ExecutionEngine, ) -> Optional[PayloadId]: # Verify consistency of the parent hash with respect to the previous execution payload header parent_hash = state.latest_execution_payload_header.block_hash # Set the forkchoice head and initiate the payload build process payload_attributes = PayloadAttributes( timestamp=compute_time_at_slot(state, state.slot), prev_randao=get_randao_mix(state, get_current_epoch(state)), suggested_fee_recipient=suggested_fee_recipient, withdrawals=get_expected_withdrawals(state), # [New in Deneb:EIP4788] parent_beacon_block_root=hash_tree_root(state.latest_block_header), ) return execution_engine.notify_forkchoice_updated( head_block_hash=parent_hash, safe_block_hash=safe_block_hash, finalized_block_hash=finalized_block_hash, payload_attributes=payload_attributes, ) - name: prepare_execution_payload#electra sources: [] spec: | def prepare_execution_payload( state: BeaconState, safe_block_hash: Hash32, finalized_block_hash: Hash32, suggested_fee_recipient: ExecutionAddress, execution_engine: ExecutionEngine, ) -> Optional[PayloadId]: # Verify consistency of the parent hash with respect to the previous execution payload header parent_hash = state.latest_execution_payload_header.block_hash # [Modified in EIP7251] # Set the forkchoice head and initiate the payload build process withdrawals, _ = get_expected_withdrawals(state) payload_attributes = PayloadAttributes( timestamp=compute_time_at_slot(state, state.slot), prev_randao=get_randao_mix(state, get_current_epoch(state)), suggested_fee_recipient=suggested_fee_recipient, withdrawals=withdrawals, parent_beacon_block_root=hash_tree_root(state.latest_block_header), ) return execution_engine.notify_forkchoice_updated( head_block_hash=parent_hash, safe_block_hash=safe_block_hash, finalized_block_hash=finalized_block_hash, payload_attributes=payload_attributes, ) - name: process_attestation#phase0 sources: - file: beacon-chain/core/blocks/attestation.go search: func ProcessAttestationNoVerifySignature( spec: | def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH assert data.index < get_committee_count_per_slot(state, data.target.epoch) committee = get_beacon_committee(state, data.slot, data.index) assert len(attestation.aggregation_bits) == len(committee) pending_attestation = PendingAttestation( data=data, aggregation_bits=attestation.aggregation_bits, inclusion_delay=state.slot - data.slot, proposer_index=get_beacon_proposer_index(state), ) if data.target.epoch == get_current_epoch(state): assert data.source == state.current_justified_checkpoint state.current_epoch_attestations.append(pending_attestation) else: assert data.source == state.previous_justified_checkpoint state.previous_epoch_attestations.append(pending_attestation) # Verify signature assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) - name: process_attestation#altair sources: - file: beacon-chain/core/altair/attestation.go search: func ProcessAttestationNoVerifySignature( spec: | def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH assert data.index < get_committee_count_per_slot(state, data.target.epoch) committee = get_beacon_committee(state, data.slot, data.index) assert len(attestation.aggregation_bits) == len(committee) # Participation flag indices participation_flag_indices = get_attestation_participation_flag_indices( state, data, state.slot - data.slot ) # Verify signature assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) # Update epoch participation flags if data.target.epoch == get_current_epoch(state): epoch_participation = state.current_epoch_participation else: epoch_participation = state.previous_epoch_participation proposer_reward_numerator = 0 for index in get_attesting_indices(state, attestation): for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): if flag_index in participation_flag_indices and not has_flag( epoch_participation[index], flag_index ): epoch_participation[index] = add_flag(epoch_participation[index], flag_index) proposer_reward_numerator += get_base_reward(state, index) * weight # Reward proposer proposer_reward_denominator = ( (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT ) proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) - name: process_attestation#deneb sources: - file: beacon-chain/core/altair/attestation.go search: func ProcessAttestationNoVerifySignature( spec: | def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch == compute_epoch_at_slot(data.slot) # [Modified in Deneb:EIP7045] assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot assert data.index < get_committee_count_per_slot(state, data.target.epoch) committee = get_beacon_committee(state, data.slot, data.index) assert len(attestation.aggregation_bits) == len(committee) # Participation flag indices participation_flag_indices = get_attestation_participation_flag_indices( state, data, state.slot - data.slot ) # Verify signature assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) # Update epoch participation flags if data.target.epoch == get_current_epoch(state): epoch_participation = state.current_epoch_participation else: epoch_participation = state.previous_epoch_participation proposer_reward_numerator = 0 for index in get_attesting_indices(state, attestation): for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): if flag_index in participation_flag_indices and not has_flag( epoch_participation[index], flag_index ): epoch_participation[index] = add_flag(epoch_participation[index], flag_index) proposer_reward_numerator += get_base_reward(state, index) * weight # Reward proposer proposer_reward_denominator = ( (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT ) proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) - name: process_attestation#electra sources: - file: beacon-chain/core/altair/attestation.go search: func ProcessAttestationNoVerifySignature( spec: | def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot # [Modified in Electra:EIP7549] assert data.index == 0 committee_indices = get_committee_indices(attestation.committee_bits) committee_offset = 0 for committee_index in committee_indices: assert committee_index < get_committee_count_per_slot(state, data.target.epoch) committee = get_beacon_committee(state, data.slot, committee_index) committee_attesters = set( attester_index for i, attester_index in enumerate(committee) if attestation.aggregation_bits[committee_offset + i] ) assert len(committee_attesters) > 0 committee_offset += len(committee) # Bitfield length matches total number of participants assert len(attestation.aggregation_bits) == committee_offset # Participation flag indices participation_flag_indices = get_attestation_participation_flag_indices( state, data, state.slot - data.slot ) # Verify signature assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) # Update epoch participation flags if data.target.epoch == get_current_epoch(state): epoch_participation = state.current_epoch_participation else: epoch_participation = state.previous_epoch_participation proposer_reward_numerator = 0 for index in get_attesting_indices(state, attestation): for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): if flag_index in participation_flag_indices and not has_flag( epoch_participation[index], flag_index ): epoch_participation[index] = add_flag(epoch_participation[index], flag_index) proposer_reward_numerator += get_base_reward(state, index) * weight # Reward proposer proposer_reward_denominator = ( (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT ) proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) - name: process_attester_slashing sources: - file: beacon-chain/core/blocks/attester_slashing.go search: func ProcessAttesterSlashing( spec: | def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None: attestation_1 = attester_slashing.attestation_1 attestation_2 = attester_slashing.attestation_2 assert is_slashable_attestation_data(attestation_1.data, attestation_2.data) assert is_valid_indexed_attestation(state, attestation_1) assert is_valid_indexed_attestation(state, attestation_2) slashed_any = False indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices) for index in sorted(indices): if is_slashable_validator(state.validators[index], get_current_epoch(state)): slash_validator(state, index) slashed_any = True assert slashed_any - name: process_block#phase0 sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessBlockNoVerifyAnySig( spec: | def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) - name: process_block#altair sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessBlockNoVerifyAnySig( spec: | def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) process_eth1_data(state, block.body) # [Modified in Altair] process_operations(state, block.body) # [New in Altair] process_sync_aggregate(state, block.body.sync_aggregate) - name: process_block#bellatrix sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessBlockNoVerifyAnySig( spec: | def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) if is_execution_enabled(state, block.body): # [New in Bellatrix] process_execution_payload(state, block.body, EXECUTION_ENGINE) process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) process_sync_aggregate(state, block.body.sync_aggregate) - name: process_block#capella sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessBlockNoVerifyAnySig( spec: | def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) # [Modified in Capella] # Removed `is_execution_enabled` call # [New in Capella] process_withdrawals(state, block.body.execution_payload) # [Modified in Capella] process_execution_payload(state, block.body, EXECUTION_ENGINE) process_randao(state, block.body) process_eth1_data(state, block.body) # [Modified in Capella] process_operations(state, block.body) process_sync_aggregate(state, block.body.sync_aggregate) - name: process_block#electra sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessBlockNoVerifyAnySig( spec: | def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) # [Modified in Electra:EIP7251] process_withdrawals(state, block.body.execution_payload) # [Modified in Electra:EIP6110] process_execution_payload(state, block.body, EXECUTION_ENGINE) process_randao(state, block.body) process_eth1_data(state, block.body) # [Modified in Electra:EIP6110:EIP7002:EIP7549:EIP7251] process_operations(state, block.body) process_sync_aggregate(state, block.body.sync_aggregate) - name: process_block_header sources: - file: beacon-chain/core/blocks/header.go search: func ProcessBlockHeader( spec: | def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the block is newer than latest block header assert block.slot > state.latest_block_header.slot # Verify that proposer index is the correct index assert block.proposer_index == get_beacon_proposer_index(state) # Verify that the parent matches assert block.parent_root == hash_tree_root(state.latest_block_header) # Cache current block as the new latest block state.latest_block_header = BeaconBlockHeader( slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=Bytes32(), # Overwritten in the next process_slot call body_root=hash_tree_root(block.body), ) # Verify proposer is not slashed proposer = state.validators[block.proposer_index] assert not proposer.slashed - name: process_bls_to_execution_change sources: - file: beacon-chain/core/blocks/withdrawals.go search: func ProcessBLSToExecutionChanges( spec: | def process_bls_to_execution_change( state: BeaconState, signed_address_change: SignedBLSToExecutionChange ) -> None: address_change = signed_address_change.message assert address_change.validator_index < len(state.validators) validator = state.validators[address_change.validator_index] assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:] # Fork-agnostic domain since address changes are valid across forks domain = compute_domain( DOMAIN_BLS_TO_EXECUTION_CHANGE, genesis_validators_root=state.genesis_validators_root ) signing_root = compute_signing_root(address_change, domain) assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) validator.withdrawal_credentials = ( ETH1_ADDRESS_WITHDRAWAL_PREFIX + b"\x00" * 11 + address_change.to_execution_address ) - name: process_consolidation_request sources: - file: beacon-chain/core/electra/consolidations.go search: func ProcessConsolidationRequests( spec: | def process_consolidation_request( state: BeaconState, consolidation_request: ConsolidationRequest ) -> None: if is_valid_switch_to_compounding_request(state, consolidation_request): validator_pubkeys = [v.pubkey for v in state.validators] request_source_pubkey = consolidation_request.source_pubkey source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey)) switch_to_compounding_validator(state, source_index) return # Verify that source != target, so a consolidation cannot be used as an exit if consolidation_request.source_pubkey == consolidation_request.target_pubkey: return # If the pending consolidations queue is full, consolidation requests are ignored if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT: return # If there is too little available consolidation churn limit, consolidation requests are ignored if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE: return validator_pubkeys = [v.pubkey for v in state.validators] # Verify pubkeys exists request_source_pubkey = consolidation_request.source_pubkey request_target_pubkey = consolidation_request.target_pubkey if request_source_pubkey not in validator_pubkeys: return if request_target_pubkey not in validator_pubkeys: return source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey)) target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey)) source_validator = state.validators[source_index] target_validator = state.validators[target_index] # Verify source withdrawal credentials has_correct_credential = has_execution_withdrawal_credential(source_validator) is_correct_source_address = ( source_validator.withdrawal_credentials[12:] == consolidation_request.source_address ) if not (has_correct_credential and is_correct_source_address): return # Verify that target has compounding withdrawal credentials if not has_compounding_withdrawal_credential(target_validator): return # Verify the source and the target are active current_epoch = get_current_epoch(state) if not is_active_validator(source_validator, current_epoch): return if not is_active_validator(target_validator, current_epoch): return # Verify exits for source and target have not been initiated if source_validator.exit_epoch != FAR_FUTURE_EPOCH: return if target_validator.exit_epoch != FAR_FUTURE_EPOCH: return # Verify the source has been active long enough if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD: return # Verify the source has no pending withdrawals in the queue if get_pending_balance_to_withdraw(state, source_index) > 0: return # Initiate source validator exit and append pending consolidation source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn( state, source_validator.effective_balance ) source_validator.withdrawable_epoch = Epoch( source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY ) state.pending_consolidations.append( PendingConsolidation(source_index=source_index, target_index=target_index) ) - name: process_deposit#phase0 sources: - file: beacon-chain/core/altair/deposit.go search: func ProcessDeposit( spec: | def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the Merkle branch assert is_valid_merkle_branch( leaf=hash_tree_root(deposit.data), branch=deposit.proof, # Add 1 for the List length mix-in depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, index=state.eth1_deposit_index, root=state.eth1_data.deposit_root, ) # Deposits must be processed in order state.eth1_deposit_index += 1 apply_deposit( state=state, pubkey=deposit.data.pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, amount=deposit.data.amount, signature=deposit.data.signature, ) - name: process_deposit#electra sources: - file: beacon-chain/core/electra/deposits.go search: func ProcessDeposit( spec: | def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the Merkle branch assert is_valid_merkle_branch( leaf=hash_tree_root(deposit.data), branch=deposit.proof, # Add 1 for the List length mix-in depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, index=state.eth1_deposit_index, root=state.eth1_data.deposit_root, ) # Deposits must be processed in order state.eth1_deposit_index += 1 # [Modified in Electra:EIP7251] apply_deposit( state=state, pubkey=deposit.data.pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, amount=deposit.data.amount, signature=deposit.data.signature, ) - name: process_deposit_request sources: - file: beacon-chain/core/electra/deposits.go search: func processDepositRequest( spec: | def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None: # Set deposit request start index if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX: state.deposit_requests_start_index = deposit_request.index # Create pending deposit state.pending_deposits.append( PendingDeposit( pubkey=deposit_request.pubkey, withdrawal_credentials=deposit_request.withdrawal_credentials, amount=deposit_request.amount, signature=deposit_request.signature, slot=state.slot, ) ) - name: process_effective_balance_updates#phase0 sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessEffectiveBalanceUpdates( spec: | def process_effective_balance_updates(state: BeaconState) -> None: # Update effective balances with hysteresis for index, validator in enumerate(state.validators): balance = state.balances[index] HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER if ( balance + DOWNWARD_THRESHOLD < validator.effective_balance or validator.effective_balance + UPWARD_THRESHOLD < balance ): validator.effective_balance = min( balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE ) - name: process_effective_balance_updates#electra sources: - file: beacon-chain/core/electra/effective_balance_updates.go search: func ProcessEffectiveBalanceUpdates( spec: | def process_effective_balance_updates(state: BeaconState) -> None: # Update effective balances with hysteresis for index, validator in enumerate(state.validators): balance = state.balances[index] HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER # [Modified in Electra:EIP7251] max_effective_balance = get_max_effective_balance(validator) if ( balance + DOWNWARD_THRESHOLD < validator.effective_balance or validator.effective_balance + UPWARD_THRESHOLD < balance ): validator.effective_balance = min( balance - balance % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance ) - name: process_epoch#phase0 sources: - file: beacon-chain/core/transition/transition.go search: func ProcessEpoch( - file: beacon-chain/core/transition/transition.go search: func ProcessEpochPrecompute( spec: | def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) process_rewards_and_penalties(state) process_registry_updates(state) process_slashings(state) process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_roots_update(state) process_participation_record_updates(state) - name: process_epoch#altair sources: - file: beacon-chain/core/transition/transition.go search: func ProcessEpoch( - file: beacon-chain/core/altair/transition.go search: func ProcessEpoch( spec: | def process_epoch(state: BeaconState) -> None: # [Modified in Altair] process_justification_and_finalization(state) # [New in Altair] process_inactivity_updates(state) # [Modified in Altair] process_rewards_and_penalties(state) process_registry_updates(state) # [Modified in Altair] process_slashings(state) process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_roots_update(state) # [New in Altair] process_participation_flag_updates(state) # [New in Altair] process_sync_committee_updates(state) - name: process_epoch#electra sources: - file: beacon-chain/core/transition/transition.go search: func ProcessEpoch( - file: beacon-chain/core/electra/transition.go search: func ProcessEpoch( spec: | def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) process_inactivity_updates(state) process_rewards_and_penalties(state) # [Modified in Electra:EIP7251] process_registry_updates(state) # [Modified in Electra:EIP7251] process_slashings(state) process_eth1_data_reset(state) # [New in Electra:EIP7251] process_pending_deposits(state) # [New in Electra:EIP7251] process_pending_consolidations(state) # [Modified in Electra:EIP7251] process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_summaries_update(state) process_participation_flag_updates(state) process_sync_committee_updates(state) - name: process_epoch#fulu sources: - file: beacon-chain/core/transition/transition.go search: func ProcessEpoch( - file: beacon-chain/core/fulu/transition.go search: func ProcessEpoch( spec: | def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) process_inactivity_updates(state) process_rewards_and_penalties(state) process_registry_updates(state) process_slashings(state) process_eth1_data_reset(state) process_pending_deposits(state) process_pending_consolidations(state) process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_summaries_update(state) process_participation_flag_updates(state) process_sync_committee_updates(state) # [New in Fulu:EIP7917] process_proposer_lookahead(state) - name: process_eth1_data sources: - file: beacon-chain/core/blocks/eth1_data.go search: func ProcessEth1DataInBlock( spec: | def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: state.eth1_data_votes.append(body.eth1_data) if ( state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH ): state.eth1_data = body.eth1_data - name: process_eth1_data_reset sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessEth1DataReset( spec: | def process_eth1_data_reset(state: BeaconState) -> None: next_epoch = Epoch(get_current_epoch(state) + 1) # Reset eth1 data votes if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: state.eth1_data_votes = [] - name: process_execution_payload#bellatrix sources: - file: beacon-chain/core/blocks/payload.go search: func ProcessPayload( spec: | def process_execution_payload( state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine ) -> None: payload = body.execution_payload # Verify consistency of the parent hash with respect to the previous execution payload header if is_merge_transition_complete(state): assert payload.parent_hash == state.latest_execution_payload_header.block_hash # Verify prev_randao assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) # Verify timestamp assert payload.timestamp == compute_time_at_slot(state, state.slot) # Verify the execution payload is valid assert execution_engine.verify_and_notify_new_payload( NewPayloadRequest(execution_payload=payload) ) # Cache execution payload header state.latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), ) - name: process_execution_payload#capella sources: - file: beacon-chain/core/blocks/payload.go search: func ProcessPayload( spec: | def process_execution_payload( state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine ) -> None: payload = body.execution_payload # [Modified in Capella] # Removed `is_merge_transition_complete` check # Verify consistency of the parent hash with respect to the previous execution payload header assert payload.parent_hash == state.latest_execution_payload_header.block_hash # Verify prev_randao assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) # Verify timestamp assert payload.timestamp == compute_time_at_slot(state, state.slot) # Verify the execution payload is valid assert execution_engine.verify_and_notify_new_payload( NewPayloadRequest(execution_payload=payload) ) # Cache execution payload header state.latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), # [New in Capella] withdrawals_root=hash_tree_root(payload.withdrawals), ) - name: process_execution_payload#deneb sources: - file: beacon-chain/core/blocks/payload.go search: func ProcessPayload( spec: | def process_execution_payload( state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine ) -> None: payload = body.execution_payload # Verify consistency of the parent hash with respect to the previous execution payload header assert payload.parent_hash == state.latest_execution_payload_header.block_hash # Verify prev_randao assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) # Verify timestamp assert payload.timestamp == compute_time_at_slot(state, state.slot) # [New in Deneb:EIP4844] Verify commitments are under limit assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK # Verify the execution payload is valid # [Modified in Deneb:EIP4844] Pass `versioned_hashes` to Execution Engine # [Modified in Deneb:EIP4788] Pass `parent_beacon_block_root` to Execution Engine versioned_hashes = [ kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments ] assert execution_engine.verify_and_notify_new_payload( NewPayloadRequest( execution_payload=payload, versioned_hashes=versioned_hashes, parent_beacon_block_root=state.latest_block_header.parent_root, ) ) # Cache execution payload header state.latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), # [New in Deneb:EIP4844] blob_gas_used=payload.blob_gas_used, # [New in Deneb:EIP4844] excess_blob_gas=payload.excess_blob_gas, ) - name: process_execution_payload#electra sources: - file: beacon-chain/core/blocks/payload.go search: func ProcessPayload( spec: | def process_execution_payload( state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine ) -> None: payload = body.execution_payload # Verify consistency of the parent hash with respect to the previous execution payload header assert payload.parent_hash == state.latest_execution_payload_header.block_hash # Verify prev_randao assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) # Verify timestamp assert payload.timestamp == compute_time_at_slot(state, state.slot) # [Modified in Electra:EIP7691] Verify commitments are under limit assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA # Verify the execution payload is valid versioned_hashes = [ kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments ] assert execution_engine.verify_and_notify_new_payload( NewPayloadRequest( execution_payload=payload, versioned_hashes=versioned_hashes, parent_beacon_block_root=state.latest_block_header.parent_root, # [New in Electra] execution_requests=body.execution_requests, ) ) # Cache execution payload header state.latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), blob_gas_used=payload.blob_gas_used, excess_blob_gas=payload.excess_blob_gas, ) - name: process_execution_payload#fulu sources: - file: beacon-chain/core/blocks/payload.go search: func ProcessPayload( spec: | def process_execution_payload( state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine ) -> None: payload = body.execution_payload # Verify consistency of the parent hash with respect to the previous execution payload header assert payload.parent_hash == state.latest_execution_payload_header.block_hash # Verify prev_randao assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) # Verify timestamp assert payload.timestamp == compute_time_at_slot(state, state.slot) # [Modified in Fulu:EIP7892] # Verify commitments are under limit assert ( len(body.blob_kzg_commitments) <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block ) # Verify the execution payload is valid versioned_hashes = [ kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments ] assert execution_engine.verify_and_notify_new_payload( NewPayloadRequest( execution_payload=payload, versioned_hashes=versioned_hashes, parent_beacon_block_root=state.latest_block_header.parent_root, execution_requests=body.execution_requests, ) ) # Cache execution payload header state.latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=payload.parent_hash, fee_recipient=payload.fee_recipient, state_root=payload.state_root, receipts_root=payload.receipts_root, logs_bloom=payload.logs_bloom, prev_randao=payload.prev_randao, block_number=payload.block_number, gas_limit=payload.gas_limit, gas_used=payload.gas_used, timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), blob_gas_used=payload.blob_gas_used, excess_blob_gas=payload.excess_blob_gas, ) - name: process_historical_roots_update sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessHistoricalDataUpdate( spec: | def process_historical_roots_update(state: BeaconState) -> None: # Set historical root accumulator next_epoch = Epoch(get_current_epoch(state) + 1) if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: historical_batch = HistoricalBatch( block_roots=state.block_roots, state_roots=state.state_roots ) state.historical_roots.append(hash_tree_root(historical_batch)) - name: process_historical_summaries_update sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessHistoricalDataUpdate( spec: | def process_historical_summaries_update(state: BeaconState) -> None: # Set historical block root accumulator. next_epoch = Epoch(get_current_epoch(state) + 1) if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: historical_summary = HistoricalSummary( block_summary_root=hash_tree_root(state.block_roots), state_summary_root=hash_tree_root(state.state_roots), ) state.historical_summaries.append(historical_summary) - name: process_inactivity_updates sources: - file: beacon-chain/core/altair/epoch_precompute.go search: func ProcessInactivityScores( spec: | def process_inactivity_updates(state: BeaconState) -> None: # Skip the genesis epoch as score updates are based on the previous epoch participation if get_current_epoch(state) == GENESIS_EPOCH: return for index in get_eligible_validator_indices(state): # Increase the inactivity score of inactive validators if index in get_unslashed_participating_indices( state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state) ): state.inactivity_scores[index] -= min(1, state.inactivity_scores[index]) else: state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS # Decrease the inactivity score of all eligible validators during a leak-free epoch if not is_in_inactivity_leak(state): state.inactivity_scores[index] -= min( INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index] ) - name: process_justification_and_finalization#phase0 sources: - file: beacon-chain/core/epoch/precompute/justification_finalization.go search: func ProcessJustificationAndFinalizationPreCompute( spec: | def process_justification_and_finalization(state: BeaconState) -> None: # Initial FFG checkpoint values have a `0x00` stub for `root`. # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. if get_current_epoch(state) <= GENESIS_EPOCH + 1: return previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) current_attestations = get_matching_target_attestations(state, get_current_epoch(state)) total_active_balance = get_total_active_balance(state) previous_target_balance = get_attesting_balance(state, previous_attestations) current_target_balance = get_attesting_balance(state, current_attestations) weigh_justification_and_finalization( state, total_active_balance, previous_target_balance, current_target_balance ) - name: process_justification_and_finalization#altair sources: - file: beacon-chain/core/epoch/precompute/justification_finalization.go search: func ProcessJustificationAndFinalizationPreCompute( spec: | def process_justification_and_finalization(state: BeaconState) -> None: # Initial FFG checkpoint values have a `0x00` stub for `root`. # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. if get_current_epoch(state) <= GENESIS_EPOCH + 1: return previous_indices = get_unslashed_participating_indices( state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state) ) current_indices = get_unslashed_participating_indices( state, TIMELY_TARGET_FLAG_INDEX, get_current_epoch(state) ) total_active_balance = get_total_active_balance(state) previous_target_balance = get_total_balance(state, previous_indices) current_target_balance = get_total_balance(state, current_indices) weigh_justification_and_finalization( state, total_active_balance, previous_target_balance, current_target_balance ) - name: process_light_client_finality_update sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientFinalityUpdateFromBeaconState( spec: | def process_light_client_finality_update( store: LightClientStore, finality_update: LightClientFinalityUpdate, current_slot: Slot, genesis_validators_root: Root, ) -> None: update = LightClientUpdate( attested_header=finality_update.attested_header, next_sync_committee=SyncCommittee(), next_sync_committee_branch=NextSyncCommitteeBranch(), finalized_header=finality_update.finalized_header, finality_branch=finality_update.finality_branch, sync_aggregate=finality_update.sync_aggregate, signature_slot=finality_update.signature_slot, ) process_light_client_update(store, update, current_slot, genesis_validators_root) - name: process_light_client_optimistic_update sources: - file: beacon-chain/light-client/lightclient.go search: func NewLightClientOptimisticUpdateFromBeaconState( spec: | def process_light_client_optimistic_update( store: LightClientStore, optimistic_update: LightClientOptimisticUpdate, current_slot: Slot, genesis_validators_root: Root, ) -> None: update = LightClientUpdate( attested_header=optimistic_update.attested_header, next_sync_committee=SyncCommittee(), next_sync_committee_branch=NextSyncCommitteeBranch(), finalized_header=LightClientHeader(), finality_branch=FinalityBranch(), sync_aggregate=optimistic_update.sync_aggregate, signature_slot=optimistic_update.signature_slot, ) process_light_client_update(store, update, current_slot, genesis_validators_root) - name: process_light_client_store_force_update sources: [] spec: | def process_light_client_store_force_update(store: LightClientStore, current_slot: Slot) -> None: if ( current_slot > store.finalized_header.beacon.slot + UPDATE_TIMEOUT and store.best_valid_update is not None ): # Forced best update when the update timeout has elapsed. # Because the apply logic waits for `finalized_header.beacon.slot` to indicate sync committee finality, # the `attested_header` may be treated as `finalized_header` in extended periods of non-finality # to guarantee progression into later sync committee periods according to `is_better_update`. if ( store.best_valid_update.finalized_header.beacon.slot <= store.finalized_header.beacon.slot ): store.best_valid_update.finalized_header = store.best_valid_update.attested_header apply_light_client_update(store, store.best_valid_update) store.best_valid_update = None - name: process_light_client_update sources: [] spec: | def process_light_client_update( store: LightClientStore, update: LightClientUpdate, current_slot: Slot, genesis_validators_root: Root, ) -> None: validate_light_client_update(store, update, current_slot, genesis_validators_root) sync_committee_bits = update.sync_aggregate.sync_committee_bits # Update the best update in case we have to force-update to it if the timeout elapses if store.best_valid_update is None or is_better_update(update, store.best_valid_update): store.best_valid_update = update # Track the maximum number of active participants in the committee signatures store.current_max_active_participants = max( store.current_max_active_participants, sum(sync_committee_bits), ) # Update the optimistic header if ( sum(sync_committee_bits) > get_safety_threshold(store) and update.attested_header.beacon.slot > store.optimistic_header.beacon.slot ): store.optimistic_header = update.attested_header # Update finalized header update_has_finalized_next_sync_committee = ( not is_next_sync_committee_known(store) and is_sync_committee_update(update) and is_finality_update(update) and ( compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot) == compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) ) ) if sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2 and ( update.finalized_header.beacon.slot > store.finalized_header.beacon.slot or update_has_finalized_next_sync_committee ): # Normal update through 2/3 threshold apply_light_client_update(store, update) store.best_valid_update = None - name: process_operations#phase0 sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessOperationsNoVerifyAttsSigs( spec: | def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min( MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index ) def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: fn(state, operation) for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) - name: process_operations#capella sources: - file: beacon-chain/core/transition/transition_no_verify_sig.go search: func ProcessOperationsNoVerifyAttsSigs( spec: | def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min( MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index ) def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: fn(state, operation) for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) # [New in Capella] for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) - name: process_operations#electra sources: - file: beacon-chain/core/electra/transition_no_verify_sig.go search: func ProcessOperations( spec: | def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # [Modified in Electra:EIP6110] # Disable former deposit mechanism once all prior deposits are processed eth1_deposit_index_limit = min( state.eth1_data.deposit_count, state.deposit_requests_start_index ) if state.eth1_deposit_index < eth1_deposit_index_limit: assert len(body.deposits) == min( MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index ) else: assert len(body.deposits) == 0 def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: fn(state, operation) for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) # [Modified in Electra:EIP7549] for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251] for_ops(body.voluntary_exits, process_voluntary_exit) for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) # [New in Electra:EIP6110] for_ops(body.execution_requests.deposits, process_deposit_request) # [New in Electra:EIP7002:EIP7251] for_ops(body.execution_requests.withdrawals, process_withdrawal_request) # [New in Electra:EIP7251] for_ops(body.execution_requests.consolidations, process_consolidation_request) - name: process_participation_flag_updates sources: - file: beacon-chain/core/altair/epoch_spec.go search: func ProcessParticipationFlagUpdates( spec: | def process_participation_flag_updates(state: BeaconState) -> None: state.previous_epoch_participation = state.current_epoch_participation state.current_epoch_participation = [ ParticipationFlags(0b0000_0000) for _ in range(len(state.validators)) ] - name: process_participation_record_updates sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessParticipationRecordUpdates( spec: | def process_participation_record_updates(state: BeaconState) -> None: # Rotate current/previous epoch attestations state.previous_epoch_attestations = state.current_epoch_attestations state.current_epoch_attestations = [] - name: process_pending_consolidations sources: - file: beacon-chain/core/electra/consolidations.go search: func ProcessPendingConsolidations( spec: | def process_pending_consolidations(state: BeaconState) -> None: next_epoch = Epoch(get_current_epoch(state) + 1) next_pending_consolidation = 0 for pending_consolidation in state.pending_consolidations: source_validator = state.validators[pending_consolidation.source_index] if source_validator.slashed: next_pending_consolidation += 1 continue if source_validator.withdrawable_epoch > next_epoch: break # Calculate the consolidated balance source_effective_balance = min( state.balances[pending_consolidation.source_index], source_validator.effective_balance ) # Move active balance to target. Excess balance is withdrawable. decrease_balance(state, pending_consolidation.source_index, source_effective_balance) increase_balance(state, pending_consolidation.target_index, source_effective_balance) next_pending_consolidation += 1 state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:] - name: process_pending_deposits sources: - file: beacon-chain/core/electra/deposits.go search: func ProcessPendingDeposits( spec: | def process_pending_deposits(state: BeaconState) -> None: next_epoch = Epoch(get_current_epoch(state) + 1) available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit( state ) processed_amount = 0 next_deposit_index = 0 deposits_to_postpone = [] is_churn_limit_reached = False finalized_slot = compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) for deposit in state.pending_deposits: # Do not process deposit requests if Eth1 bridge deposits are not yet applied. if ( # Is deposit request deposit.slot > GENESIS_SLOT and # There are pending Eth1 bridge deposits state.eth1_deposit_index < state.deposit_requests_start_index ): break # Check if deposit has been finalized, otherwise, stop processing. if deposit.slot > finalized_slot: break # Check if number of processed deposits has not reached the limit, otherwise, stop processing. if next_deposit_index >= MAX_PENDING_DEPOSITS_PER_EPOCH: break # Read validator state is_validator_exited = False is_validator_withdrawn = False validator_pubkeys = [v.pubkey for v in state.validators] if deposit.pubkey in validator_pubkeys: validator = state.validators[ValidatorIndex(validator_pubkeys.index(deposit.pubkey))] is_validator_exited = validator.exit_epoch < FAR_FUTURE_EPOCH is_validator_withdrawn = validator.withdrawable_epoch < next_epoch if is_validator_withdrawn: # Deposited balance will never become active. Increase balance but do not consume churn apply_pending_deposit(state, deposit) elif is_validator_exited: # Validator is exiting, postpone the deposit until after withdrawable epoch deposits_to_postpone.append(deposit) else: # Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch. is_churn_limit_reached = processed_amount + deposit.amount > available_for_processing if is_churn_limit_reached: break # Consume churn and apply deposit. processed_amount += deposit.amount apply_pending_deposit(state, deposit) # Regardless of how the deposit was handled, we move on in the queue. next_deposit_index += 1 state.pending_deposits = state.pending_deposits[next_deposit_index:] + deposits_to_postpone # Accumulate churn only if the churn limit has been hit. if is_churn_limit_reached: state.deposit_balance_to_consume = available_for_processing - processed_amount else: state.deposit_balance_to_consume = Gwei(0) - name: process_proposer_lookahead sources: - file: beacon-chain/core/fulu/transition.go search: func ProcessProposerLookahead( spec: | def process_proposer_lookahead(state: BeaconState) -> None: last_epoch_start = len(state.proposer_lookahead) - SLOTS_PER_EPOCH # Shift out proposers in the first epoch state.proposer_lookahead[:last_epoch_start] = state.proposer_lookahead[SLOTS_PER_EPOCH:] # Fill in the last epoch with new proposer indices last_epoch_proposers = get_beacon_proposer_indices( state, Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1) ) state.proposer_lookahead[last_epoch_start:] = last_epoch_proposers - name: process_proposer_slashing sources: - file: beacon-chain/core/blocks/proposer_slashing.go search: func ProcessProposerSlashing( spec: | def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: header_1 = proposer_slashing.signed_header_1.message header_2 = proposer_slashing.signed_header_2.message # Verify header slots match assert header_1.slot == header_2.slot # Verify header proposer indices match assert header_1.proposer_index == header_2.proposer_index # Verify the headers are different assert header_1 != header_2 # Verify the proposer is slashable proposer = state.validators[header_1.proposer_index] assert is_slashable_validator(proposer, get_current_epoch(state)) # Verify signatures for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2): domain = get_domain( state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot) ) signing_root = compute_signing_root(signed_header.message, domain) assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature) slash_validator(state, header_1.proposer_index) - name: process_randao sources: - file: beacon-chain/core/blocks/randao.go search: func ProcessRandao( spec: | def process_randao(state: BeaconState, body: BeaconBlockBody) -> None: epoch = get_current_epoch(state) # Verify RANDAO reveal proposer = state.validators[get_beacon_proposer_index(state)] signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO)) assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal) # Mix in RANDAO reveal mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal)) state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix - name: process_randao_mixes_reset sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessRandaoMixesReset( spec: | def process_randao_mixes_reset(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = Epoch(current_epoch + 1) # Set randao mix state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix( state, current_epoch ) - name: process_registry_updates#phase0 sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessRegistryUpdates( spec: | def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validators): if is_eligible_for_activation_queue(validator): validator.activation_eligibility_epoch = get_current_epoch(state) + 1 if ( is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE ): initiate_validator_exit(state, ValidatorIndex(index)) # Queue validators eligible for activation and not yet dequeued for activation activation_queue = sorted( [ index for index, validator in enumerate(state.validators) if is_eligible_for_activation(state, validator) ], # Order by the sequence of activation_eligibility_epoch setting and then index key=lambda index: (state.validators[index].activation_eligibility_epoch, index), ) # Dequeued validators for activation up to churn limit for index in activation_queue[: get_validator_churn_limit(state)]: validator = state.validators[index] validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) - name: process_registry_updates#deneb sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessRegistryUpdates( spec: | def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validators): if is_eligible_for_activation_queue(validator): validator.activation_eligibility_epoch = get_current_epoch(state) + 1 if ( is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE ): initiate_validator_exit(state, ValidatorIndex(index)) # Queue validators eligible for activation and not yet dequeued for activation activation_queue = sorted( [ index for index, validator in enumerate(state.validators) if is_eligible_for_activation(state, validator) ], # Order by the sequence of activation_eligibility_epoch setting and then index key=lambda index: (state.validators[index].activation_eligibility_epoch, index), ) # Dequeued validators for activation up to activation churn limit # [Modified in Deneb:EIP7514] for index in activation_queue[: get_validator_activation_churn_limit(state)]: validator = state.validators[index] validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) - name: process_registry_updates#electra sources: - file: beacon-chain/core/electra/registry_updates.go search: func ProcessRegistryUpdates( spec: | def process_registry_updates(state: BeaconState) -> None: current_epoch = get_current_epoch(state) activation_epoch = compute_activation_exit_epoch(current_epoch) # Process activation eligibility, ejections, and activations for index, validator in enumerate(state.validators): # [Modified in Electra:EIP7251] if is_eligible_for_activation_queue(validator): validator.activation_eligibility_epoch = current_epoch + 1 elif ( is_active_validator(validator, current_epoch) and validator.effective_balance <= EJECTION_BALANCE ): # [Modified in Electra:EIP7251] initiate_validator_exit(state, ValidatorIndex(index)) elif is_eligible_for_activation(state, validator): validator.activation_epoch = activation_epoch - name: process_rewards_and_penalties#phase0 sources: - file: beacon-chain/core/epoch/precompute/reward_penalty.go search: func ProcessRewardsAndPenaltiesPrecompute( spec: | def process_rewards_and_penalties(state: BeaconState) -> None: # No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch if get_current_epoch(state) == GENESIS_EPOCH: return rewards, penalties = get_attestation_deltas(state) for index in range(len(state.validators)): increase_balance(state, ValidatorIndex(index), rewards[index]) decrease_balance(state, ValidatorIndex(index), penalties[index]) - name: process_rewards_and_penalties#altair sources: - file: beacon-chain/core/altair/epoch_precompute.go search: func ProcessRewardsAndPenaltiesPrecompute( spec: | def process_rewards_and_penalties(state: BeaconState) -> None: # No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch if get_current_epoch(state) == GENESIS_EPOCH: return flag_deltas = [ get_flag_index_deltas(state, flag_index) for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS)) ] deltas = flag_deltas + [get_inactivity_penalty_deltas(state)] for rewards, penalties in deltas: for index in range(len(state.validators)): increase_balance(state, ValidatorIndex(index), rewards[index]) decrease_balance(state, ValidatorIndex(index), penalties[index]) - name: process_slashings#phase0 sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessSlashings( spec: | def process_slashings(state: BeaconState) -> None: epoch = get_current_epoch(state) total_balance = get_total_active_balance(state) adjusted_total_slashing_balance = min( sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance ) for index, validator in enumerate(state.validators): if ( validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch ): increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow penalty_numerator = ( validator.effective_balance // increment * adjusted_total_slashing_balance ) penalty = penalty_numerator // total_balance * increment decrease_balance(state, ValidatorIndex(index), penalty) - name: process_slashings#altair sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessSlashings( spec: | def process_slashings(state: BeaconState) -> None: epoch = get_current_epoch(state) total_balance = get_total_active_balance(state) adjusted_total_slashing_balance = min( sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance ) for index, validator in enumerate(state.validators): if ( validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch ): increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow penalty_numerator = ( validator.effective_balance // increment * adjusted_total_slashing_balance ) penalty = penalty_numerator // total_balance * increment decrease_balance(state, ValidatorIndex(index), penalty) - name: process_slashings#bellatrix sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessSlashings( spec: | def process_slashings(state: BeaconState) -> None: epoch = get_current_epoch(state) total_balance = get_total_active_balance(state) adjusted_total_slashing_balance = min( sum(state.slashings) # [Modified in Bellatrix] * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, total_balance, ) for index, validator in enumerate(state.validators): if ( validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch ): increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow penalty_numerator = ( validator.effective_balance // increment * adjusted_total_slashing_balance ) penalty = penalty_numerator // total_balance * increment decrease_balance(state, ValidatorIndex(index), penalty) - name: process_slashings#electra sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessSlashings( spec: | def process_slashings(state: BeaconState) -> None: epoch = get_current_epoch(state) total_balance = get_total_active_balance(state) adjusted_total_slashing_balance = min( sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, total_balance ) increment = ( EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow ) penalty_per_effective_balance_increment = adjusted_total_slashing_balance // ( total_balance // increment ) for index, validator in enumerate(state.validators): if ( validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch ): effective_balance_increments = validator.effective_balance // increment # [Modified in Electra:EIP7251] penalty = penalty_per_effective_balance_increment * effective_balance_increments decrease_balance(state, ValidatorIndex(index), penalty) - name: process_slashings_reset sources: - file: beacon-chain/core/epoch/epoch_processing.go search: func ProcessSlashingsReset( spec: | def process_slashings_reset(state: BeaconState) -> None: next_epoch = Epoch(get_current_epoch(state) + 1) # Reset slashings state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0) - name: process_slot sources: - file: beacon-chain/core/transition/transition.go search: func ProcessSlot( spec: | def process_slot(state: BeaconState) -> None: # Cache state root previous_state_root = hash_tree_root(state) state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root # Cache latest block header state root if state.latest_block_header.state_root == Bytes32(): state.latest_block_header.state_root = previous_state_root # Cache block root previous_block_root = hash_tree_root(state.latest_block_header) state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root - name: process_slots sources: - file: beacon-chain/core/transition/transition.go search: func ProcessSlots( spec: | def process_slots(state: BeaconState, slot: Slot) -> None: assert state.slot < slot while state.slot < slot: process_slot(state) # Process epoch on the start slot of the next epoch if (state.slot + 1) % SLOTS_PER_EPOCH == 0: process_epoch(state) state.slot = Slot(state.slot + 1) - name: process_sync_aggregate sources: - file: beacon-chain/core/altair/block.go search: func ProcessSyncAggregate( spec: | def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None: # Verify sync committee aggregate signature signing over the previous slot block root committee_pubkeys = state.current_sync_committee.pubkeys participant_pubkeys = [ pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit ] previous_slot = max(state.slot, Slot(1)) - Slot(1) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) assert eth_fast_aggregate_verify( participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature ) # Compute participant and proposer rewards total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments) max_participant_rewards = Gwei( total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH ) participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE) proposer_reward = Gwei( participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) ) # Apply participant and proposer rewards all_pubkeys = [v.pubkey for v in state.validators] committee_indices = [ ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys ] for participant_index, participation_bit in zip( committee_indices, sync_aggregate.sync_committee_bits ): if participation_bit: increase_balance(state, participant_index, participant_reward) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) else: decrease_balance(state, participant_index, participant_reward) - name: process_sync_committee_contributions sources: [] spec: | def process_sync_committee_contributions( block: BeaconBlock, contributions: Set[SyncCommitteeContribution] ) -> None: sync_aggregate = SyncAggregate() signatures = [] sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT for contribution in contributions: subcommittee_index = contribution.subcommittee_index for index, participated in enumerate(contribution.aggregation_bits): if participated: participant_index = sync_subcommittee_size * subcommittee_index + index sync_aggregate.sync_committee_bits[participant_index] = True signatures.append(contribution.signature) sync_aggregate.sync_committee_signature = bls.Aggregate(signatures) block.body.sync_aggregate = sync_aggregate - name: process_sync_committee_updates sources: - file: beacon-chain/core/altair/epoch_spec.go search: func ProcessSyncCommitteeUpdates( spec: | def process_sync_committee_updates(state: BeaconState) -> None: next_epoch = get_current_epoch(state) + Epoch(1) if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0: state.current_sync_committee = state.next_sync_committee state.next_sync_committee = get_next_sync_committee(state) - name: process_voluntary_exit#phase0 sources: - file: beacon-chain/core/blocks/exit.go search: func ProcessVoluntaryExits( spec: | def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: voluntary_exit = signed_voluntary_exit.message validator = state.validators[voluntary_exit.validator_index] # Verify the validator is active assert is_active_validator(validator, get_current_epoch(state)) # Verify exit has not been initiated assert validator.exit_epoch == FAR_FUTURE_EPOCH # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= voluntary_exit.epoch # Verify the validator has been active long enough assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD # Verify signature domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) signing_root = compute_signing_root(voluntary_exit, domain) assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) # Initiate exit initiate_validator_exit(state, voluntary_exit.validator_index) - name: process_voluntary_exit#deneb sources: - file: beacon-chain/core/blocks/exit.go search: func ProcessVoluntaryExits( spec: | def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: voluntary_exit = signed_voluntary_exit.message validator = state.validators[voluntary_exit.validator_index] # Verify the validator is active assert is_active_validator(validator, get_current_epoch(state)) # Verify exit has not been initiated assert validator.exit_epoch == FAR_FUTURE_EPOCH # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= voluntary_exit.epoch # Verify the validator has been active long enough assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD # Verify signature # [Modified in Deneb:EIP7044] domain = compute_domain( DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root ) signing_root = compute_signing_root(voluntary_exit, domain) assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) # Initiate exit initiate_validator_exit(state, voluntary_exit.validator_index) - name: process_voluntary_exit#electra sources: - file: beacon-chain/core/blocks/exit.go search: func ProcessVoluntaryExits( spec: | def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: voluntary_exit = signed_voluntary_exit.message validator = state.validators[voluntary_exit.validator_index] # Verify the validator is active assert is_active_validator(validator, get_current_epoch(state)) # Verify exit has not been initiated assert validator.exit_epoch == FAR_FUTURE_EPOCH # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= voluntary_exit.epoch # Verify the validator has been active long enough assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD # [New in Electra:EIP7251] # Only exit validator if it has no pending withdrawals in the queue assert get_pending_balance_to_withdraw(state, voluntary_exit.validator_index) == 0 # Verify signature domain = compute_domain( DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root ) signing_root = compute_signing_root(voluntary_exit, domain) assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) # Initiate exit initiate_validator_exit(state, voluntary_exit.validator_index) - name: process_withdrawal_request sources: - file: beacon-chain/core/electra/withdrawals.go search: func ProcessWithdrawalRequests( spec: | def process_withdrawal_request(state: BeaconState, withdrawal_request: WithdrawalRequest) -> None: amount = withdrawal_request.amount is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT # If partial withdrawal queue is full, only full exits are processed if ( len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request ): return validator_pubkeys = [v.pubkey for v in state.validators] # Verify pubkey exists request_pubkey = withdrawal_request.validator_pubkey if request_pubkey not in validator_pubkeys: return index = ValidatorIndex(validator_pubkeys.index(request_pubkey)) validator = state.validators[index] # Verify withdrawal credentials has_correct_credential = has_execution_withdrawal_credential(validator) is_correct_source_address = ( validator.withdrawal_credentials[12:] == withdrawal_request.source_address ) if not (has_correct_credential and is_correct_source_address): return # Verify the validator is active if not is_active_validator(validator, get_current_epoch(state)): return # Verify exit has not been initiated if validator.exit_epoch != FAR_FUTURE_EPOCH: return # Verify the validator has been active long enough if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD: return pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index) if is_full_exit_request: # Only exit validator if it has no pending withdrawals in the queue if pending_balance_to_withdraw == 0: initiate_validator_exit(state, index) return has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE has_excess_balance = ( state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw ) # Only allow partial withdrawals with compounding withdrawal credentials if ( has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance ): to_withdraw = min( state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw, amount ) exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw) withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) state.pending_partial_withdrawals.append( PendingPartialWithdrawal( validator_index=index, amount=to_withdraw, withdrawable_epoch=withdrawable_epoch, ) ) - name: process_withdrawals#capella sources: - file: beacon-chain/core/blocks/withdrawals.go search: func ProcessWithdrawals( spec: | def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None: expected_withdrawals = get_expected_withdrawals(state) assert payload.withdrawals == expected_withdrawals for withdrawal in expected_withdrawals: decrease_balance(state, withdrawal.validator_index, withdrawal.amount) # Update the next withdrawal index if this block contained withdrawals if len(expected_withdrawals) != 0: latest_withdrawal = expected_withdrawals[-1] state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1) # Update the next validator index to start the next withdrawal sweep if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: # Next sweep starts after the latest withdrawal's validator index next_validator_index = ValidatorIndex( (expected_withdrawals[-1].validator_index + 1) % len(state.validators) ) state.next_withdrawal_validator_index = next_validator_index else: # Advance sweep by the max length of the sweep if there was not a full set of withdrawals next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP next_validator_index = ValidatorIndex(next_index % len(state.validators)) state.next_withdrawal_validator_index = next_validator_index - name: process_withdrawals#electra sources: - file: beacon-chain/core/blocks/withdrawals.go search: func ProcessWithdrawals( spec: | def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None: # [Modified in Electra:EIP7251] expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) assert payload.withdrawals == expected_withdrawals for withdrawal in expected_withdrawals: decrease_balance(state, withdrawal.validator_index, withdrawal.amount) # [New in Electra:EIP7251] Update pending partial withdrawals state.pending_partial_withdrawals = state.pending_partial_withdrawals[ processed_partial_withdrawals_count: ] # Update the next withdrawal index if this block contained withdrawals if len(expected_withdrawals) != 0: latest_withdrawal = expected_withdrawals[-1] state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1) # Update the next validator index to start the next withdrawal sweep if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD: # Next sweep starts after the latest withdrawal's validator index next_validator_index = ValidatorIndex( (expected_withdrawals[-1].validator_index + 1) % len(state.validators) ) state.next_withdrawal_validator_index = next_validator_index else: # Advance sweep by the max length of the sweep if there was not a full set of withdrawals next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP next_validator_index = ValidatorIndex(next_index % len(state.validators)) state.next_withdrawal_validator_index = next_validator_index - name: queue_excess_active_balance sources: - file: beacon-chain/core/electra/validator.go search: func QueueExcessActiveBalance( spec: | def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None: balance = state.balances[index] if balance > MIN_ACTIVATION_BALANCE: excess_balance = balance - MIN_ACTIVATION_BALANCE state.balances[index] = MIN_ACTIVATION_BALANCE validator = state.validators[index] # Use bls.G2_POINT_AT_INFINITY as a signature field placeholder # and GENESIS_SLOT to distinguish from a pending deposit request state.pending_deposits.append( PendingDeposit( pubkey=validator.pubkey, withdrawal_credentials=validator.withdrawal_credentials, amount=excess_balance, signature=bls.G2_POINT_AT_INFINITY, slot=GENESIS_SLOT, ) ) - name: recover_matrix sources: [] spec: | def recover_matrix( partial_matrix: Sequence[MatrixEntry], blob_count: uint64 ) -> Sequence[MatrixEntry]: """ Recover the full, flattened sequence of matrix entries. This helper demonstrates how to apply ``recover_cells_and_kzg_proofs``. The data structure for storing cells/proofs is implementation-dependent. """ matrix = [] for blob_index in range(blob_count): cell_indices = [e.column_index for e in partial_matrix if e.row_index == blob_index] cells = [e.cell for e in partial_matrix if e.row_index == blob_index] recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells) for cell_index, (cell, proof) in enumerate(zip(recovered_cells, recovered_proofs)): matrix.append( MatrixEntry( cell=cell, kzg_proof=proof, row_index=blob_index, column_index=cell_index, ) ) return matrix - name: saturating_sub sources: - file: consensus-types/primitives/slot.go search: func (s Slot) SafeSub( spec: | def saturating_sub(a: int, b: int) -> int: """ Computes a - b, saturating at numeric bounds. """ return a - b if a > b else 0 - name: seconds_to_milliseconds sources: [] spec: | def seconds_to_milliseconds(seconds: uint64) -> uint64: """ Convert seconds to milliseconds with overflow protection. Returns ``UINT64_MAX`` if the result would overflow. """ if seconds > UINT64_MAX // 1000: return UINT64_MAX return seconds * 1000 - name: set_or_append_list sources: [] spec: | def set_or_append_list(list: List, index: ValidatorIndex, value: Any) -> None: if index == len(list): list.append(value) else: list[index] = value - name: should_override_forkchoice_update sources: - file: beacon-chain/forkchoice/ro.go search: func (ro *ROForkChoice) ShouldOverrideFCU( spec: | def should_override_forkchoice_update(store: Store, head_root: Root) -> bool: head_block = store.blocks[head_root] parent_root = head_block.parent_root parent_block = store.blocks[parent_root] current_slot = get_current_slot(store) proposal_slot = head_block.slot + Slot(1) # Only re-org the head_block block if it arrived later than the attestation deadline. head_late = is_head_late(store, head_root) # Shuffling stable. shuffling_stable = is_shuffling_stable(proposal_slot) # FFG information of the new head_block will be competitive with the current head. ffg_competitive = is_ffg_competitive(store, head_root, parent_root) # Do not re-org if the chain is not finalizing with acceptable frequency. finalization_ok = is_finalization_ok(store, proposal_slot) # Only suppress the fork choice update if we are confident that we will propose the next block. parent_state_advanced = store.block_states[parent_root].copy() process_slots(parent_state_advanced, proposal_slot) proposer_index = get_beacon_proposer_index(parent_state_advanced) proposing_reorg_slot = validator_is_connected(proposer_index) # Single slot re-org. parent_slot_ok = parent_block.slot + 1 == head_block.slot proposing_on_time = is_proposing_on_time(store) # Note that this condition is different from `get_proposer_head` current_time_ok = head_block.slot == current_slot or ( proposal_slot == current_slot and proposing_on_time ) single_slot_reorg = parent_slot_ok and current_time_ok # Check the head weight only if the attestations from the head slot have already been applied. # Implementations may want to do this in different ways, e.g. by advancing # `store.time` early, or by counting queued attestations during the head block's slot. if current_slot > head_block.slot: head_weak = is_head_weak(store, head_root) parent_strong = is_parent_strong(store, parent_root) else: head_weak = True parent_strong = True return all( [ head_late, shuffling_stable, ffg_competitive, finalization_ok, proposing_reorg_slot, single_slot_reorg, head_weak, parent_strong, ] ) - name: slash_validator#phase0 sources: - file: beacon-chain/core/validators/validator.go search: func SlashValidator( spec: | def slash_validator( state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None ) -> None: """ Slash the validator with index ``slashed_index``. """ epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) validator = state.validators[slashed_index] validator.slashed = True validator.withdrawable_epoch = max( validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) ) state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance decrease_balance( state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT ) # Apply proposer and whistleblower rewards proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT) increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) - name: slash_validator#altair sources: - file: beacon-chain/core/validators/validator.go search: func SlashValidator( spec: | def slash_validator( state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None ) -> None: """ Slash the validator with index ``slashed_index``. """ epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) validator = state.validators[slashed_index] validator.slashed = True validator.withdrawable_epoch = max( validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) ) state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance decrease_balance( state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR ) # Apply proposer and whistleblower rewards proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR) increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) - name: slash_validator#bellatrix sources: - file: beacon-chain/core/validators/validator.go search: func SlashValidator( spec: | def slash_validator( state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None ) -> None: """ Slash the validator with index ``slashed_index``. """ epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) validator = state.validators[slashed_index] validator.slashed = True validator.withdrawable_epoch = max( validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) ) state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance # [Modified in Bellatrix] slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX decrease_balance(state, slashed_index, slashing_penalty) # Apply proposer and whistleblower rewards proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR) increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) - name: slash_validator#electra sources: - file: beacon-chain/core/validators/validator.go search: func SlashValidator( spec: | def slash_validator( state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None ) -> None: """ Slash the validator with index ``slashed_index``. """ epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) validator = state.validators[slashed_index] validator.slashed = True validator.withdrawable_epoch = max( validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR) ) state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance # [Modified in Electra:EIP7251] slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA decrease_balance(state, slashed_index, slashing_penalty) # Apply proposer and whistleblower rewards proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index # [Modified in Electra:EIP7251] whistleblower_reward = Gwei( validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA ) proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR) increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) - name: state_transition sources: - file: beacon-chain/core/transition/transition.go search: func ExecuteStateTransition( spec: | def state_transition( state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool = True ) -> None: block = signed_block.message # Process slots (including those with no blocks) since block process_slots(state, block.slot) # Verify signature if validate_result: assert verify_block_signature(state, signed_block) # Process block process_block(state, block) # Verify state root if validate_result: assert block.state_root == hash_tree_root(state) - name: store_target_checkpoint_state sources: [] spec: | def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: # Store target checkpoint state if not yet seen if target not in store.checkpoint_states: base_state = copy(store.block_states[target.root]) if base_state.slot < compute_start_slot_at_epoch(target.epoch): process_slots(base_state, compute_start_slot_at_epoch(target.epoch)) store.checkpoint_states[target] = base_state - name: switch_to_compounding_validator sources: - file: beacon-chain/core/electra/validator.go search: func SwitchToCompoundingValidator( spec: | def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None: validator = state.validators[index] validator.withdrawal_credentials = ( COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:] ) queue_excess_active_balance(state, index) - name: translate_participation sources: - file: beacon-chain/core/altair/upgrade.go search: func TranslateParticipation( spec: | def translate_participation( state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation] ) -> None: for attestation in pending_attestations: data = attestation.data inclusion_delay = attestation.inclusion_delay # Translate attestation inclusion info to flag indices participation_flag_indices = get_attestation_participation_flag_indices( state, data, inclusion_delay ) # Apply flags to all attesting validators epoch_participation = state.previous_epoch_participation for index in get_attesting_indices(state, attestation): for flag_index in participation_flag_indices: epoch_participation[index] = add_flag(epoch_participation[index], flag_index) - name: update_checkpoints sources: - file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go search: func (f *ForkChoice) updateCheckpoints( spec: | def update_checkpoints( store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint ) -> None: """ Update checkpoints in store if necessary """ # Update justified checkpoint if justified_checkpoint.epoch > store.justified_checkpoint.epoch: store.justified_checkpoint = justified_checkpoint # Update finalized checkpoint if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: store.finalized_checkpoint = finalized_checkpoint - name: update_latest_messages sources: [] spec: | def update_latest_messages( store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation ) -> None: target = attestation.data.target beacon_block_root = attestation.data.beacon_block_root non_equivocating_attesting_indices = [ i for i in attesting_indices if i not in store.equivocating_indices ] for i in non_equivocating_attesting_indices: if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch: store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root) - name: update_unrealized_checkpoints sources: - file: beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification.go search: func (f *ForkChoice) updateUnrealizedCheckpoints( spec: | def update_unrealized_checkpoints( store: Store, unrealized_justified_checkpoint: Checkpoint, unrealized_finalized_checkpoint: Checkpoint, ) -> None: """ Update unrealized checkpoints in store if necessary """ # Update unrealized justified checkpoint if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch: store.unrealized_justified_checkpoint = unrealized_justified_checkpoint # Update unrealized finalized checkpoint if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch: store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint - name: upgrade_lc_bootstrap_to_capella sources: [] spec: | def upgrade_lc_bootstrap_to_capella(pre: altair.LightClientBootstrap) -> LightClientBootstrap: return LightClientBootstrap( header=upgrade_lc_header_to_capella(pre.header), current_sync_committee=pre.current_sync_committee, current_sync_committee_branch=pre.current_sync_committee_branch, ) - name: upgrade_lc_bootstrap_to_deneb sources: [] spec: | def upgrade_lc_bootstrap_to_deneb(pre: capella.LightClientBootstrap) -> LightClientBootstrap: return LightClientBootstrap( header=upgrade_lc_header_to_deneb(pre.header), current_sync_committee=pre.current_sync_committee, current_sync_committee_branch=pre.current_sync_committee_branch, ) - name: upgrade_lc_bootstrap_to_electra sources: [] spec: | def upgrade_lc_bootstrap_to_electra(pre: deneb.LightClientBootstrap) -> LightClientBootstrap: return LightClientBootstrap( header=upgrade_lc_header_to_electra(pre.header), current_sync_committee=pre.current_sync_committee, current_sync_committee_branch=normalize_merkle_branch( pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA ), ) - name: upgrade_lc_finality_update_to_capella sources: [] spec: | def upgrade_lc_finality_update_to_capella( pre: altair.LightClientFinalityUpdate, ) -> LightClientFinalityUpdate: return LightClientFinalityUpdate( attested_header=upgrade_lc_header_to_capella(pre.attested_header), finalized_header=upgrade_lc_header_to_capella(pre.finalized_header), finality_branch=pre.finality_branch, sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_finality_update_to_deneb sources: [] spec: | def upgrade_lc_finality_update_to_deneb( pre: capella.LightClientFinalityUpdate, ) -> LightClientFinalityUpdate: return LightClientFinalityUpdate( attested_header=upgrade_lc_header_to_deneb(pre.attested_header), finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), finality_branch=pre.finality_branch, sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_finality_update_to_electra sources: [] spec: | def upgrade_lc_finality_update_to_electra( pre: deneb.LightClientFinalityUpdate, ) -> LightClientFinalityUpdate: return LightClientFinalityUpdate( attested_header=upgrade_lc_header_to_electra(pre.attested_header), finalized_header=upgrade_lc_header_to_electra(pre.finalized_header), finality_branch=normalize_merkle_branch(pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA), sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_header_to_capella sources: [] spec: | def upgrade_lc_header_to_capella(pre: altair.LightClientHeader) -> LightClientHeader: return LightClientHeader( beacon=pre.beacon, execution=ExecutionPayloadHeader(), execution_branch=ExecutionBranch(), ) - name: upgrade_lc_header_to_deneb sources: [] spec: | def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHeader: return LightClientHeader( beacon=pre.beacon, execution=ExecutionPayloadHeader( parent_hash=pre.execution.parent_hash, fee_recipient=pre.execution.fee_recipient, state_root=pre.execution.state_root, receipts_root=pre.execution.receipts_root, logs_bloom=pre.execution.logs_bloom, prev_randao=pre.execution.prev_randao, block_number=pre.execution.block_number, gas_limit=pre.execution.gas_limit, gas_used=pre.execution.gas_used, timestamp=pre.execution.timestamp, extra_data=pre.execution.extra_data, base_fee_per_gas=pre.execution.base_fee_per_gas, block_hash=pre.execution.block_hash, transactions_root=pre.execution.transactions_root, withdrawals_root=pre.execution.withdrawals_root, # [New in Deneb:EIP4844] blob_gas_used=uint64(0), # [New in Deneb:EIP4844] excess_blob_gas=uint64(0), ), execution_branch=pre.execution_branch, ) - name: upgrade_lc_header_to_electra sources: [] spec: | def upgrade_lc_header_to_electra(pre: deneb.LightClientHeader) -> LightClientHeader: return LightClientHeader( beacon=pre.beacon, execution=pre.execution, execution_branch=pre.execution_branch, ) - name: upgrade_lc_optimistic_update_to_capella sources: [] spec: | def upgrade_lc_optimistic_update_to_capella( pre: altair.LightClientOptimisticUpdate, ) -> LightClientOptimisticUpdate: return LightClientOptimisticUpdate( attested_header=upgrade_lc_header_to_capella(pre.attested_header), sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_optimistic_update_to_deneb sources: [] spec: | def upgrade_lc_optimistic_update_to_deneb( pre: capella.LightClientOptimisticUpdate, ) -> LightClientOptimisticUpdate: return LightClientOptimisticUpdate( attested_header=upgrade_lc_header_to_deneb(pre.attested_header), sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_optimistic_update_to_electra sources: [] spec: | def upgrade_lc_optimistic_update_to_electra( pre: deneb.LightClientOptimisticUpdate, ) -> LightClientOptimisticUpdate: return LightClientOptimisticUpdate( attested_header=upgrade_lc_header_to_electra(pre.attested_header), sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_store_to_capella sources: [] spec: | def upgrade_lc_store_to_capella(pre: altair.LightClientStore) -> LightClientStore: if pre.best_valid_update is None: best_valid_update = None else: best_valid_update = upgrade_lc_update_to_capella(pre.best_valid_update) return LightClientStore( finalized_header=upgrade_lc_header_to_capella(pre.finalized_header), current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, best_valid_update=best_valid_update, optimistic_header=upgrade_lc_header_to_capella(pre.optimistic_header), previous_max_active_participants=pre.previous_max_active_participants, current_max_active_participants=pre.current_max_active_participants, ) - name: upgrade_lc_store_to_deneb sources: [] spec: | def upgrade_lc_store_to_deneb(pre: capella.LightClientStore) -> LightClientStore: if pre.best_valid_update is None: best_valid_update = None else: best_valid_update = upgrade_lc_update_to_deneb(pre.best_valid_update) return LightClientStore( finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, best_valid_update=best_valid_update, optimistic_header=upgrade_lc_header_to_deneb(pre.optimistic_header), previous_max_active_participants=pre.previous_max_active_participants, current_max_active_participants=pre.current_max_active_participants, ) - name: upgrade_lc_store_to_electra sources: [] spec: | def upgrade_lc_store_to_electra(pre: deneb.LightClientStore) -> LightClientStore: if pre.best_valid_update is None: best_valid_update = None else: best_valid_update = upgrade_lc_update_to_electra(pre.best_valid_update) return LightClientStore( finalized_header=upgrade_lc_header_to_electra(pre.finalized_header), current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, best_valid_update=best_valid_update, optimistic_header=upgrade_lc_header_to_electra(pre.optimistic_header), previous_max_active_participants=pre.previous_max_active_participants, current_max_active_participants=pre.current_max_active_participants, ) - name: upgrade_lc_update_to_capella sources: [] spec: | def upgrade_lc_update_to_capella(pre: altair.LightClientUpdate) -> LightClientUpdate: return LightClientUpdate( attested_header=upgrade_lc_header_to_capella(pre.attested_header), next_sync_committee=pre.next_sync_committee, next_sync_committee_branch=pre.next_sync_committee_branch, finalized_header=upgrade_lc_header_to_capella(pre.finalized_header), finality_branch=pre.finality_branch, sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_update_to_deneb sources: [] spec: | def upgrade_lc_update_to_deneb(pre: capella.LightClientUpdate) -> LightClientUpdate: return LightClientUpdate( attested_header=upgrade_lc_header_to_deneb(pre.attested_header), next_sync_committee=pre.next_sync_committee, next_sync_committee_branch=pre.next_sync_committee_branch, finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), finality_branch=pre.finality_branch, sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_lc_update_to_electra sources: [] spec: | def upgrade_lc_update_to_electra(pre: deneb.LightClientUpdate) -> LightClientUpdate: return LightClientUpdate( attested_header=upgrade_lc_header_to_electra(pre.attested_header), next_sync_committee=pre.next_sync_committee, next_sync_committee_branch=normalize_merkle_branch( pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA ), finalized_header=upgrade_lc_header_to_electra(pre.finalized_header), finality_branch=normalize_merkle_branch(pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA), sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) - name: upgrade_to_altair sources: - file: beacon-chain/core/altair/upgrade.go search: func UpgradeToAltair( spec: | def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState: epoch = phase0.get_current_epoch(pre) post = BeaconState( genesis_time=pre.genesis_time, genesis_validators_root=pre.genesis_validators_root, slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, current_version=ALTAIR_FORK_VERSION, epoch=epoch, ), latest_block_header=pre.latest_block_header, block_roots=pre.block_roots, state_roots=pre.state_roots, historical_roots=pre.historical_roots, eth1_data=pre.eth1_data, eth1_data_votes=pre.eth1_data_votes, eth1_deposit_index=pre.eth1_deposit_index, validators=pre.validators, balances=pre.balances, randao_mixes=pre.randao_mixes, slashings=pre.slashings, previous_epoch_participation=[ ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators)) ], current_epoch_participation=[ ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators)) ], justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], ) # Fill in previous epoch participation from the pre state's pending attestations translate_participation(post, pre.previous_epoch_attestations) # Fill in sync committees # Note: A duplicate committee is assigned for the current and next committee at the fork boundary post.current_sync_committee = get_next_sync_committee(post) post.next_sync_committee = get_next_sync_committee(post) return post - name: upgrade_to_bellatrix sources: - file: beacon-chain/core/execution/upgrade.go search: func UpgradeToBellatrix( spec: | def upgrade_to_bellatrix(pre: altair.BeaconState) -> BeaconState: epoch = altair.get_current_epoch(pre) post = BeaconState( genesis_time=pre.genesis_time, genesis_validators_root=pre.genesis_validators_root, slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, # [New in Bellatrix] current_version=BELLATRIX_FORK_VERSION, epoch=epoch, ), latest_block_header=pre.latest_block_header, block_roots=pre.block_roots, state_roots=pre.state_roots, historical_roots=pre.historical_roots, eth1_data=pre.eth1_data, eth1_data_votes=pre.eth1_data_votes, eth1_deposit_index=pre.eth1_deposit_index, validators=pre.validators, balances=pre.balances, randao_mixes=pre.randao_mixes, slashings=pre.slashings, previous_epoch_participation=pre.previous_epoch_participation, current_epoch_participation=pre.current_epoch_participation, justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, inactivity_scores=pre.inactivity_scores, current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, # [New in Bellatrix] latest_execution_payload_header=ExecutionPayloadHeader(), ) return post - name: upgrade_to_capella sources: - file: beacon-chain/core/capella/upgrade.go search: func UpgradeToCapella( spec: | def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState: epoch = bellatrix.get_current_epoch(pre) latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=pre.latest_execution_payload_header.parent_hash, fee_recipient=pre.latest_execution_payload_header.fee_recipient, state_root=pre.latest_execution_payload_header.state_root, receipts_root=pre.latest_execution_payload_header.receipts_root, logs_bloom=pre.latest_execution_payload_header.logs_bloom, prev_randao=pre.latest_execution_payload_header.prev_randao, block_number=pre.latest_execution_payload_header.block_number, gas_limit=pre.latest_execution_payload_header.gas_limit, gas_used=pre.latest_execution_payload_header.gas_used, timestamp=pre.latest_execution_payload_header.timestamp, extra_data=pre.latest_execution_payload_header.extra_data, base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, block_hash=pre.latest_execution_payload_header.block_hash, transactions_root=pre.latest_execution_payload_header.transactions_root, # [New in Capella] withdrawals_root=Root(), ) post = BeaconState( genesis_time=pre.genesis_time, genesis_validators_root=pre.genesis_validators_root, slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, current_version=CAPELLA_FORK_VERSION, epoch=epoch, ), latest_block_header=pre.latest_block_header, block_roots=pre.block_roots, state_roots=pre.state_roots, historical_roots=pre.historical_roots, eth1_data=pre.eth1_data, eth1_data_votes=pre.eth1_data_votes, eth1_deposit_index=pre.eth1_deposit_index, validators=pre.validators, balances=pre.balances, randao_mixes=pre.randao_mixes, slashings=pre.slashings, previous_epoch_participation=pre.previous_epoch_participation, current_epoch_participation=pre.current_epoch_participation, justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, inactivity_scores=pre.inactivity_scores, current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, latest_execution_payload_header=latest_execution_payload_header, # [New in Capella] next_withdrawal_index=WithdrawalIndex(0), # [New in Capella] next_withdrawal_validator_index=ValidatorIndex(0), # [New in Capella] historical_summaries=List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]([]), ) return post - name: upgrade_to_deneb sources: - file: beacon-chain/core/deneb/upgrade.go search: func UpgradeToDeneb( spec: | def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: epoch = capella.get_current_epoch(pre) latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=pre.latest_execution_payload_header.parent_hash, fee_recipient=pre.latest_execution_payload_header.fee_recipient, state_root=pre.latest_execution_payload_header.state_root, receipts_root=pre.latest_execution_payload_header.receipts_root, logs_bloom=pre.latest_execution_payload_header.logs_bloom, prev_randao=pre.latest_execution_payload_header.prev_randao, block_number=pre.latest_execution_payload_header.block_number, gas_limit=pre.latest_execution_payload_header.gas_limit, gas_used=pre.latest_execution_payload_header.gas_used, timestamp=pre.latest_execution_payload_header.timestamp, extra_data=pre.latest_execution_payload_header.extra_data, base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, block_hash=pre.latest_execution_payload_header.block_hash, transactions_root=pre.latest_execution_payload_header.transactions_root, withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, # [New in Deneb:EIP4844] blob_gas_used=uint64(0), # [New in Deneb:EIP4844] excess_blob_gas=uint64(0), ) post = BeaconState( genesis_time=pre.genesis_time, genesis_validators_root=pre.genesis_validators_root, slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, # [Modified in Deneb] current_version=DENEB_FORK_VERSION, epoch=epoch, ), latest_block_header=pre.latest_block_header, block_roots=pre.block_roots, state_roots=pre.state_roots, historical_roots=pre.historical_roots, eth1_data=pre.eth1_data, eth1_data_votes=pre.eth1_data_votes, eth1_deposit_index=pre.eth1_deposit_index, validators=pre.validators, balances=pre.balances, randao_mixes=pre.randao_mixes, slashings=pre.slashings, previous_epoch_participation=pre.previous_epoch_participation, current_epoch_participation=pre.current_epoch_participation, justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, inactivity_scores=pre.inactivity_scores, current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, # [Modified in Deneb:EIP4844] latest_execution_payload_header=latest_execution_payload_header, next_withdrawal_index=pre.next_withdrawal_index, next_withdrawal_validator_index=pre.next_withdrawal_validator_index, historical_summaries=pre.historical_summaries, ) return post - name: upgrade_to_electra sources: - file: beacon-chain/core/electra/upgrade.go search: func UpgradeToElectra( spec: | def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState: epoch = deneb.get_current_epoch(pre) earliest_exit_epoch = compute_activation_exit_epoch(get_current_epoch(pre)) for validator in pre.validators: if validator.exit_epoch != FAR_FUTURE_EPOCH: if validator.exit_epoch > earliest_exit_epoch: earliest_exit_epoch = validator.exit_epoch earliest_exit_epoch += Epoch(1) post = BeaconState( genesis_time=pre.genesis_time, genesis_validators_root=pre.genesis_validators_root, slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, # [Modified in Electra] current_version=ELECTRA_FORK_VERSION, epoch=epoch, ), latest_block_header=pre.latest_block_header, block_roots=pre.block_roots, state_roots=pre.state_roots, historical_roots=pre.historical_roots, eth1_data=pre.eth1_data, eth1_data_votes=pre.eth1_data_votes, eth1_deposit_index=pre.eth1_deposit_index, validators=pre.validators, balances=pre.balances, randao_mixes=pre.randao_mixes, slashings=pre.slashings, previous_epoch_participation=pre.previous_epoch_participation, current_epoch_participation=pre.current_epoch_participation, justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, inactivity_scores=pre.inactivity_scores, current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, latest_execution_payload_header=pre.latest_execution_payload_header, next_withdrawal_index=pre.next_withdrawal_index, next_withdrawal_validator_index=pre.next_withdrawal_validator_index, historical_summaries=pre.historical_summaries, # [New in Electra:EIP6110] deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX, # [New in Electra:EIP7251] deposit_balance_to_consume=0, # [New in Electra:EIP7251] exit_balance_to_consume=0, # [New in Electra:EIP7251] earliest_exit_epoch=earliest_exit_epoch, # [New in Electra:EIP7251] consolidation_balance_to_consume=0, # [New in Electra:EIP7251] earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)), # [New in Electra:EIP7251] pending_deposits=[], # [New in Electra:EIP7251] pending_partial_withdrawals=[], # [New in Electra:EIP7251] pending_consolidations=[], ) post.exit_balance_to_consume = get_activation_exit_churn_limit(post) post.consolidation_balance_to_consume = get_consolidation_churn_limit(post) # [New in Electra:EIP7251] # add validators that are not yet active to pending balance deposits pre_activation = sorted( [ index for index, validator in enumerate(post.validators) if validator.activation_epoch == FAR_FUTURE_EPOCH ], key=lambda index: (post.validators[index].activation_eligibility_epoch, index), ) for index in pre_activation: balance = post.balances[index] post.balances[index] = 0 validator = post.validators[index] validator.effective_balance = 0 validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH # Use bls.G2_POINT_AT_INFINITY as a signature field placeholder # and GENESIS_SLOT to distinguish from a pending deposit request post.pending_deposits.append( PendingDeposit( pubkey=validator.pubkey, withdrawal_credentials=validator.withdrawal_credentials, amount=balance, signature=bls.G2_POINT_AT_INFINITY, slot=GENESIS_SLOT, ) ) # Ensure early adopters of compounding credentials go through the activation churn for index, validator in enumerate(post.validators): if has_compounding_withdrawal_credential(validator): queue_excess_active_balance(post, ValidatorIndex(index)) return post - name: upgrade_to_fulu sources: - file: beacon-chain/core/fulu/upgrade.go search: func UpgradeToFulu( spec: | def upgrade_to_fulu(pre: electra.BeaconState) -> BeaconState: epoch = electra.get_current_epoch(pre) post = BeaconState( genesis_time=pre.genesis_time, genesis_validators_root=pre.genesis_validators_root, slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, # [Modified in Fulu] current_version=FULU_FORK_VERSION, epoch=epoch, ), latest_block_header=pre.latest_block_header, block_roots=pre.block_roots, state_roots=pre.state_roots, historical_roots=pre.historical_roots, eth1_data=pre.eth1_data, eth1_data_votes=pre.eth1_data_votes, eth1_deposit_index=pre.eth1_deposit_index, validators=pre.validators, balances=pre.balances, randao_mixes=pre.randao_mixes, slashings=pre.slashings, previous_epoch_participation=pre.previous_epoch_participation, current_epoch_participation=pre.current_epoch_participation, justification_bits=pre.justification_bits, previous_justified_checkpoint=pre.previous_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint, finalized_checkpoint=pre.finalized_checkpoint, inactivity_scores=pre.inactivity_scores, current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, latest_execution_payload_header=pre.latest_execution_payload_header, next_withdrawal_index=pre.next_withdrawal_index, next_withdrawal_validator_index=pre.next_withdrawal_validator_index, historical_summaries=pre.historical_summaries, deposit_requests_start_index=pre.deposit_requests_start_index, deposit_balance_to_consume=pre.deposit_balance_to_consume, exit_balance_to_consume=pre.exit_balance_to_consume, earliest_exit_epoch=pre.earliest_exit_epoch, consolidation_balance_to_consume=pre.consolidation_balance_to_consume, earliest_consolidation_epoch=pre.earliest_consolidation_epoch, pending_deposits=pre.pending_deposits, pending_partial_withdrawals=pre.pending_partial_withdrawals, pending_consolidations=pre.pending_consolidations, # [New in Fulu:EIP7917] proposer_lookahead=initialize_proposer_lookahead(pre), ) return post - name: validate_light_client_update sources: [] spec: | def validate_light_client_update( store: LightClientStore, update: LightClientUpdate, current_slot: Slot, genesis_validators_root: Root, ) -> None: # Verify sync committee has sufficient participants sync_aggregate = update.sync_aggregate assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS # Verify update does not skip a sync committee period assert is_valid_light_client_header(update.attested_header) update_attested_slot = update.attested_header.beacon.slot update_finalized_slot = update.finalized_header.beacon.slot assert current_slot >= update.signature_slot > update_attested_slot >= update_finalized_slot store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot) update_signature_period = compute_sync_committee_period_at_slot(update.signature_slot) if is_next_sync_committee_known(store): assert update_signature_period in (store_period, store_period + 1) else: assert update_signature_period == store_period # Verify update is relevant update_attested_period = compute_sync_committee_period_at_slot(update_attested_slot) update_has_next_sync_committee = not is_next_sync_committee_known(store) and ( is_sync_committee_update(update) and update_attested_period == store_period ) assert ( update_attested_slot > store.finalized_header.beacon.slot or update_has_next_sync_committee ) # Verify that the `finality_branch`, if present, confirms `finalized_header` # to match the finalized checkpoint root saved in the state of `attested_header`. # Note that the genesis finalized checkpoint root is represented as a zero hash. if not is_finality_update(update): assert update.finalized_header == LightClientHeader() else: if update_finalized_slot == GENESIS_SLOT: assert update.finalized_header == LightClientHeader() finalized_root = Bytes32() else: assert is_valid_light_client_header(update.finalized_header) finalized_root = hash_tree_root(update.finalized_header.beacon) assert is_valid_normalized_merkle_branch( leaf=finalized_root, branch=update.finality_branch, gindex=finalized_root_gindex_at_slot(update.attested_header.beacon.slot), root=update.attested_header.beacon.state_root, ) # Verify that the `next_sync_committee`, if present, actually is the next sync committee saved in the # state of the `attested_header` if not is_sync_committee_update(update): assert update.next_sync_committee == SyncCommittee() else: if update_attested_period == store_period and is_next_sync_committee_known(store): assert update.next_sync_committee == store.next_sync_committee assert is_valid_normalized_merkle_branch( leaf=hash_tree_root(update.next_sync_committee), branch=update.next_sync_committee_branch, gindex=next_sync_committee_gindex_at_slot(update.attested_header.beacon.slot), root=update.attested_header.beacon.state_root, ) # Verify sync committee aggregate signature if update_signature_period == store_period: sync_committee = store.current_sync_committee else: sync_committee = store.next_sync_committee participant_pubkeys = [ pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) if bit ] fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1) fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot)) domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) signing_root = compute_signing_root(update.attested_header.beacon, domain) assert bls.FastAggregateVerify( participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature ) - name: validate_merge_block sources: - file: beacon-chain/blockchain/pow_block.go search: func (s *Service) validateMergeBlock( spec: | def validate_merge_block(block: BeaconBlock) -> None: """ Check the parent PoW block of execution payload is a valid terminal PoW block. Note: Unavailable PoW block(s) may later become available, and a client software MAY delay a call to ``validate_merge_block`` until the PoW block(s) become available. """ if TERMINAL_BLOCK_HASH != Hash32(): # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH return pow_block = get_pow_block(block.body.execution_payload.parent_hash) # Check if `pow_block` is available assert pow_block is not None pow_parent = get_pow_block(pow_block.parent_hash) # Check if `pow_parent` is available assert pow_parent is not None # Check if `pow_block` is a valid terminal PoW block assert is_valid_terminal_pow_block(pow_block, pow_parent) - name: validate_on_attestation sources: [] spec: | def validate_on_attestation(store: Store, attestation: Attestation, is_from_block: bool) -> None: target = attestation.data.target # If the given attestation is not from a beacon block message, we have to check the target epoch scope. if not is_from_block: validate_target_epoch_against_current_time(store, attestation) # Check that the epoch number and slot number are matching assert target.epoch == compute_epoch_at_slot(attestation.data.slot) # Attestation target must be for a known block. If target block is unknown, delay consideration until block is found assert target.root in store.blocks # Attestations must be for a known block. If block is unknown, delay consideration until the block is found assert attestation.data.beacon_block_root in store.blocks # Attestations must not be for blocks in the future. If not, the attestation should not be considered assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot # LMD vote must be consistent with FFG vote target assert target.root == get_checkpoint_block( store, attestation.data.beacon_block_root, target.epoch ) # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. assert get_current_slot(store) >= attestation.data.slot + 1 - name: validate_target_epoch_against_current_time sources: [] spec: | def validate_target_epoch_against_current_time(store: Store, attestation: Attestation) -> None: target = attestation.data.target # Attestations must be from the current or previous epoch current_epoch = get_current_store_epoch(store) # Use GENESIS_EPOCH for previous when genesis to avoid underflow previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH # If attestation target is from a future epoch, delay consideration until the epoch arrives assert target.epoch in [current_epoch, previous_epoch] - name: verify_blob_sidecar_inclusion_proof sources: - file: consensus-types/blocks/kzg.go search: func VerifyKZGInclusionProof( spec: | def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool: gindex = get_subtree_index( get_generalized_index(BeaconBlockBody, "blob_kzg_commitments", blob_sidecar.index) ) return is_valid_merkle_branch( leaf=blob_sidecar.kzg_commitment.hash_tree_root(), branch=blob_sidecar.kzg_commitment_inclusion_proof, depth=KZG_COMMITMENT_INCLUSION_PROOF_DEPTH, index=gindex, root=blob_sidecar.signed_block_header.message.body_root, ) - name: verify_block_signature sources: - file: beacon-chain/core/blocks/signature.go search: func VerifyBlockSignature( spec: | def verify_block_signature(state: BeaconState, signed_block: SignedBeaconBlock) -> bool: proposer = state.validators[signed_block.message.proposer_index] signing_root = compute_signing_root( signed_block.message, get_domain(state, DOMAIN_BEACON_PROPOSER) ) return bls.Verify(proposer.pubkey, signing_root, signed_block.signature) - name: verify_data_column_sidecar sources: - file: beacon-chain/core/peerdas/p2p_interface.go search: func VerifyDataColumnSidecar( spec: | def verify_data_column_sidecar(sidecar: DataColumnSidecar) -> bool: """ Verify if the data column sidecar is valid. """ # The sidecar index must be within the valid range if sidecar.index >= NUMBER_OF_COLUMNS: return False # A sidecar for zero blobs is invalid if len(sidecar.kzg_commitments) == 0: return False # Check that the sidecar respects the blob limit epoch = compute_epoch_at_slot(sidecar.signed_block_header.message.slot) if len(sidecar.kzg_commitments) > get_blob_parameters(epoch).max_blobs_per_block: return False # The column length must be equal to the number of commitments/proofs if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len( sidecar.kzg_proofs ): return False return True - name: verify_data_column_sidecar_inclusion_proof sources: - file: beacon-chain/core/peerdas/p2p_interface.go search: func VerifyDataColumnSidecarInclusionProof( spec: | def verify_data_column_sidecar_inclusion_proof(sidecar: DataColumnSidecar) -> bool: """ Verify if the given KZG commitments included in the given beacon block. """ return is_valid_merkle_branch( leaf=hash_tree_root(sidecar.kzg_commitments), branch=sidecar.kzg_commitments_inclusion_proof, depth=KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH, index=get_subtree_index(get_generalized_index(BeaconBlockBody, "blob_kzg_commitments")), root=sidecar.signed_block_header.message.body_root, ) - name: verify_data_column_sidecar_kzg_proofs sources: - file: beacon-chain/core/peerdas/p2p_interface.go search: func VerifyDataColumnsSidecarKZGProofs( spec: | def verify_data_column_sidecar_kzg_proofs(sidecar: DataColumnSidecar) -> bool: """ Verify if the KZG proofs are correct. """ # The column index also represents the cell index cell_indices = [CellIndex(sidecar.index)] * len(sidecar.column) # Batch verify that the cells match the corresponding commitments and proofs return verify_cell_kzg_proof_batch( commitments_bytes=sidecar.kzg_commitments, cell_indices=cell_indices, cells=sidecar.column, proofs_bytes=sidecar.kzg_proofs, ) - name: voting_period_start_time sources: - file: time/slots/slottime.go search: func VotingPeriodStartTime( spec: | def voting_period_start_time(state: BeaconState) -> uint64: eth1_voting_period_start_slot = Slot( state.slot - state.slot % (EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH) ) return compute_time_at_slot(state, eth1_voting_period_start_slot) - name: weigh_justification_and_finalization sources: - file: beacon-chain/core/epoch/precompute/justification_finalization.go search: func weighJustificationAndFinalization( spec: | def weigh_justification_and_finalization( state: BeaconState, total_active_balance: Gwei, previous_epoch_target_balance: Gwei, current_epoch_target_balance: Gwei, ) -> None: previous_epoch = get_previous_epoch(state) current_epoch = get_current_epoch(state) old_previous_justified_checkpoint = state.previous_justified_checkpoint old_current_justified_checkpoint = state.current_justified_checkpoint # Process justifications state.previous_justified_checkpoint = state.current_justified_checkpoint state.justification_bits[1:] = state.justification_bits[: JUSTIFICATION_BITS_LENGTH - 1] state.justification_bits[0] = 0b0 if previous_epoch_target_balance * 3 >= total_active_balance * 2: state.current_justified_checkpoint = Checkpoint( epoch=previous_epoch, root=get_block_root(state, previous_epoch) ) state.justification_bits[1] = 0b1 if current_epoch_target_balance * 3 >= total_active_balance * 2: state.current_justified_checkpoint = Checkpoint( epoch=current_epoch, root=get_block_root(state, current_epoch) ) state.justification_bits[0] = 0b1 # Process finalizations bits = state.justification_bits # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch: state.finalized_checkpoint = old_previous_justified_checkpoint # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch: state.finalized_checkpoint = old_previous_justified_checkpoint # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch: state.finalized_checkpoint = old_current_justified_checkpoint # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch: state.finalized_checkpoint = old_current_justified_checkpoint - name: xor sources: [] spec: | def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: """ Return the exclusive-or of two 32-byte strings. """ return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2))