mirror of
https://github.com/ethereum/consensus-specs.git
synced 2026-01-11 01:28:08 -05:00
Run linter on code in markdown files (#4359)
This commit is contained in:
@@ -40,6 +40,7 @@ lint = [
|
||||
"codespell==2.4.1",
|
||||
"mdformat-gfm-alerts==1.0.2",
|
||||
"mdformat-gfm==0.4.1",
|
||||
"mdformat-ruff==0.1.3",
|
||||
"mdformat-toc==0.3.0",
|
||||
"mdformat==0.7.22",
|
||||
"mypy==1.16.0",
|
||||
|
||||
@@ -145,9 +145,13 @@ class BeaconBlockBody(sharding.BeaconBlockBody):
|
||||
class BeaconState(sharding.BeaconState):
|
||||
# Future derived secrets already exposed; contains the indices of the exposed validator
|
||||
# at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
|
||||
exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH],
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
|
||||
custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS]
|
||||
exposed_derived_secrets: Vector[
|
||||
List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH],
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS,
|
||||
]
|
||||
custody_chunk_challenge_records: List[
|
||||
CustodyChunkChallengeRecord, MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS
|
||||
]
|
||||
custody_chunk_challenge_index: uint64
|
||||
```
|
||||
|
||||
@@ -264,7 +268,7 @@ def legendre_bit(a: int, q: int) -> int:
|
||||
return legendre_bit(a % q, q)
|
||||
if a == 0:
|
||||
return 0
|
||||
assert(q > a > 0 and q % 2 == 1)
|
||||
assert q > a > 0 and q % 2 == 1
|
||||
t = 1
|
||||
n = q
|
||||
while a != 0:
|
||||
@@ -291,10 +295,11 @@ one legendre bit.
|
||||
```python
|
||||
def get_custody_atoms(bytez: bytes) -> Sequence[bytes]:
|
||||
length_remainder = len(bytez) % BYTES_PER_CUSTODY_ATOM
|
||||
bytez += b'\x00' * ((BYTES_PER_CUSTODY_ATOM - length_remainder) % BYTES_PER_CUSTODY_ATOM) # right-padding
|
||||
bytez += b"\x00" * (
|
||||
(BYTES_PER_CUSTODY_ATOM - length_remainder) % BYTES_PER_CUSTODY_ATOM
|
||||
) # right-padding
|
||||
return [
|
||||
bytez[i:i + BYTES_PER_CUSTODY_ATOM]
|
||||
for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)
|
||||
bytez[i : i + BYTES_PER_CUSTODY_ATOM] for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)
|
||||
]
|
||||
```
|
||||
|
||||
@@ -307,8 +312,10 @@ def get_custody_secrets(key: BLSSignature) -> Sequence[int]:
|
||||
full_G2_element = bls.signature_to_G2(key)
|
||||
signature = full_G2_element[0].coeffs
|
||||
signature_bytes = b"".join(x.to_bytes(48, "little") for x in signature)
|
||||
secrets = [int.from_bytes(signature_bytes[i:i + BYTES_PER_CUSTODY_ATOM], "little")
|
||||
for i in range(0, len(signature_bytes), 32)]
|
||||
secrets = [
|
||||
int.from_bytes(signature_bytes[i : i + BYTES_PER_CUSTODY_ATOM], "little")
|
||||
for i in range(0, len(signature_bytes), 32)
|
||||
]
|
||||
return secrets
|
||||
```
|
||||
|
||||
@@ -319,9 +326,10 @@ def universal_hash_function(data_chunks: Sequence[bytes], secrets: Sequence[int]
|
||||
n = len(data_chunks)
|
||||
return (
|
||||
sum(
|
||||
secrets[i % CUSTODY_SECRETS]**i * int.from_bytes(atom, "little") % CUSTODY_PRIME
|
||||
secrets[i % CUSTODY_SECRETS] ** i * int.from_bytes(atom, "little") % CUSTODY_PRIME
|
||||
for i, atom in enumerate(data_chunks)
|
||||
) + secrets[n % CUSTODY_SECRETS]**n
|
||||
)
|
||||
+ secrets[n % CUSTODY_SECRETS] ** n
|
||||
) % CUSTODY_PRIME
|
||||
```
|
||||
|
||||
@@ -332,7 +340,10 @@ def compute_custody_bit(key: BLSSignature, data: ByteList) -> bit:
|
||||
custody_atoms = get_custody_atoms(data)
|
||||
secrets = get_custody_secrets(key)
|
||||
uhf = universal_hash_function(custody_atoms, secrets)
|
||||
legendre_bits = [legendre_bit(uhf + secrets[0] + i, CUSTODY_PRIME) for i in range(CUSTODY_PROBABILITY_EXPONENT)]
|
||||
legendre_bits = [
|
||||
legendre_bit(uhf + secrets[0] + i, CUSTODY_PRIME)
|
||||
for i in range(CUSTODY_PROBABILITY_EXPONENT)
|
||||
]
|
||||
return bit(all(legendre_bits))
|
||||
```
|
||||
|
||||
@@ -340,7 +351,9 @@ def compute_custody_bit(key: BLSSignature, data: ByteList) -> bit:
|
||||
|
||||
```python
|
||||
def get_randao_epoch_for_custody_period(period: uint64, validator_index: ValidatorIndex) -> Epoch:
|
||||
next_period_start = (period + 1) * EPOCHS_PER_CUSTODY_PERIOD - validator_index % EPOCHS_PER_CUSTODY_PERIOD
|
||||
next_period_start = (
|
||||
period + 1
|
||||
) * EPOCHS_PER_CUSTODY_PERIOD - validator_index % EPOCHS_PER_CUSTODY_PERIOD
|
||||
return Epoch(next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING)
|
||||
```
|
||||
|
||||
@@ -348,9 +361,9 @@ def get_randao_epoch_for_custody_period(period: uint64, validator_index: Validat
|
||||
|
||||
```python
|
||||
def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epoch) -> uint64:
|
||||
'''
|
||||
"""
|
||||
Return the reveal period for a given validator.
|
||||
'''
|
||||
"""
|
||||
return (epoch + validator_index % EPOCHS_PER_CUSTODY_PERIOD) // EPOCHS_PER_CUSTODY_PERIOD
|
||||
```
|
||||
|
||||
@@ -388,9 +401,13 @@ def process_custody_game_operations(state: BeaconState, body: BeaconBlockBody) -
|
||||
```python
|
||||
def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None:
|
||||
# Verify the attestation
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, challenge.attestation))
|
||||
assert is_valid_indexed_attestation(
|
||||
state, get_indexed_attestation(state, challenge.attestation)
|
||||
)
|
||||
# Verify it is not too late to challenge the attestation
|
||||
max_attestation_challenge_epoch = Epoch(challenge.attestation.data.target.epoch + MAX_CHUNK_CHALLENGE_DELAY)
|
||||
max_attestation_challenge_epoch = Epoch(
|
||||
challenge.attestation.data.target.epoch + MAX_CHUNK_CHALLENGE_DELAY
|
||||
)
|
||||
assert get_current_epoch(state) <= max_attestation_challenge_epoch
|
||||
# Verify it is not too late to challenge the responder
|
||||
responder = state.validators[challenge.responder_index]
|
||||
@@ -402,17 +419,19 @@ def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge
|
||||
attesters = get_attesting_indices(state, challenge)
|
||||
assert challenge.responder_index in attesters
|
||||
# Verify shard transition is correctly given
|
||||
assert hash_tree_root(challenge.shard_transition) == challenge.attestation.data.shard_transition_root
|
||||
assert (
|
||||
hash_tree_root(challenge.shard_transition)
|
||||
== challenge.attestation.data.shard_transition_root
|
||||
)
|
||||
data_root = challenge.shard_transition.shard_data_roots[challenge.data_index]
|
||||
# Verify the challenge is not a duplicate
|
||||
for record in state.custody_chunk_challenge_records:
|
||||
assert (
|
||||
record.data_root != data_root or
|
||||
record.chunk_index != challenge.chunk_index
|
||||
)
|
||||
assert record.data_root != data_root or record.chunk_index != challenge.chunk_index
|
||||
# Verify depth
|
||||
shard_block_length = challenge.shard_transition.shard_block_lengths[challenge.data_index]
|
||||
transition_chunks = (shard_block_length + BYTES_PER_CUSTODY_CHUNK - 1) // BYTES_PER_CUSTODY_CHUNK
|
||||
transition_chunks = (
|
||||
shard_block_length + BYTES_PER_CUSTODY_CHUNK - 1
|
||||
) // BYTES_PER_CUSTODY_CHUNK
|
||||
assert challenge.chunk_index < transition_chunks
|
||||
# Add new chunk challenge record
|
||||
new_record = CustodyChunkChallengeRecord(
|
||||
@@ -433,11 +452,11 @@ def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge
|
||||
#### Custody chunk response
|
||||
|
||||
```python
|
||||
def process_chunk_challenge_response(state: BeaconState,
|
||||
response: CustodyChunkResponse) -> None:
|
||||
def process_chunk_challenge_response(state: BeaconState, response: CustodyChunkResponse) -> None:
|
||||
# Get matching challenge (if any) from records
|
||||
matching_challenges = [
|
||||
record for record in state.custody_chunk_challenge_records
|
||||
record
|
||||
for record in state.custody_chunk_challenge_records
|
||||
if record.challenge_index == response.challenge_index
|
||||
]
|
||||
assert len(matching_challenges) == 1
|
||||
@@ -457,7 +476,9 @@ def process_chunk_challenge_response(state: BeaconState,
|
||||
state.custody_chunk_challenge_records[index_in_records] = CustodyChunkChallengeRecord()
|
||||
# Reward the proposer
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
|
||||
increase_balance(
|
||||
state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT)
|
||||
)
|
||||
```
|
||||
|
||||
#### Custody key reveals
|
||||
@@ -469,9 +490,13 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
||||
Note that this function mutates ``state``.
|
||||
"""
|
||||
revealer = state.validators[reveal.revealer_index]
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_secret_to_reveal, reveal.revealer_index)
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(
|
||||
revealer.next_custody_secret_to_reveal, reveal.revealer_index
|
||||
)
|
||||
|
||||
custody_reveal_period = get_custody_period_for_validator(reveal.revealer_index, get_current_epoch(state))
|
||||
custody_reveal_period = get_custody_period_for_validator(
|
||||
reveal.revealer_index, get_current_epoch(state)
|
||||
)
|
||||
# Only past custody periods can be revealed, except after exiting the exit period can be revealed
|
||||
is_past_reveal = revealer.next_custody_secret_to_reveal < custody_reveal_period
|
||||
is_exited = revealer.exit_epoch <= get_current_epoch(state)
|
||||
@@ -499,14 +524,16 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
||||
increase_balance(
|
||||
state,
|
||||
proposer_index,
|
||||
Gwei(get_base_reward(state, reveal.revealer_index) // MINOR_REWARD_QUOTIENT)
|
||||
Gwei(get_base_reward(state, reveal.revealer_index) // MINOR_REWARD_QUOTIENT),
|
||||
)
|
||||
```
|
||||
|
||||
#### Early derived secret reveals
|
||||
|
||||
```python
|
||||
def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerivedSecretReveal) -> None:
|
||||
def process_early_derived_secret_reveal(
|
||||
state: BeaconState, reveal: EarlyDerivedSecretReveal
|
||||
) -> None:
|
||||
"""
|
||||
Process ``EarlyDerivedSecretReveal`` operation.
|
||||
Note that this function mutates ``state``.
|
||||
@@ -524,7 +551,9 @@ def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerived
|
||||
pubkeys = [revealed_validator.pubkey, masker.pubkey]
|
||||
|
||||
domain = get_domain(state, DOMAIN_RANDAO, reveal.epoch)
|
||||
signing_roots = [compute_signing_root(root, domain) for root in [hash_tree_root(reveal.epoch), reveal.mask]]
|
||||
signing_roots = [
|
||||
compute_signing_root(root, domain) for root in [hash_tree_root(reveal.epoch), reveal.mask]
|
||||
]
|
||||
assert bls.AggregateVerify(pubkeys, signing_roots, reveal.reveal)
|
||||
|
||||
if reveal.epoch >= get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING:
|
||||
@@ -565,7 +594,9 @@ def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerived
|
||||
#### Custody Slashings
|
||||
|
||||
```python
|
||||
def process_custody_slashing(state: BeaconState, signed_custody_slashing: SignedCustodySlashing) -> None:
|
||||
def process_custody_slashing(
|
||||
state: BeaconState, signed_custody_slashing: SignedCustodySlashing
|
||||
) -> None:
|
||||
custody_slashing = signed_custody_slashing.message
|
||||
attestation = custody_slashing.attestation
|
||||
|
||||
@@ -589,15 +620,23 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||
shard_transition = custody_slashing.shard_transition
|
||||
assert hash_tree_root(shard_transition) == attestation.data.shard_transition_root
|
||||
# Verify that the provided data matches the shard-transition
|
||||
assert len(custody_slashing.data) == shard_transition.shard_block_lengths[custody_slashing.data_index]
|
||||
assert hash_tree_root(custody_slashing.data) == shard_transition.shard_data_roots[custody_slashing.data_index]
|
||||
assert (
|
||||
len(custody_slashing.data)
|
||||
== shard_transition.shard_block_lengths[custody_slashing.data_index]
|
||||
)
|
||||
assert (
|
||||
hash_tree_root(custody_slashing.data)
|
||||
== shard_transition.shard_data_roots[custody_slashing.data_index]
|
||||
)
|
||||
# Verify existence and participation of claimed malefactor
|
||||
attesters = get_attesting_indices(state, attestation)
|
||||
assert custody_slashing.malefactor_index in attesters
|
||||
|
||||
# Verify the malefactor custody key
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(
|
||||
get_custody_period_for_validator(custody_slashing.malefactor_index, attestation.data.target.epoch),
|
||||
get_custody_period_for_validator(
|
||||
custody_slashing.malefactor_index, attestation.data.target.epoch
|
||||
),
|
||||
custody_slashing.malefactor_index,
|
||||
)
|
||||
domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
|
||||
@@ -605,7 +644,9 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||
assert bls.Verify(malefactor.pubkey, signing_root, custody_slashing.malefactor_secret)
|
||||
|
||||
# Compute the custody bit
|
||||
computed_custody_bit = compute_custody_bit(custody_slashing.malefactor_secret, custody_slashing.data)
|
||||
computed_custody_bit = compute_custody_bit(
|
||||
custody_slashing.malefactor_secret, custody_slashing.data
|
||||
)
|
||||
|
||||
# Verify the claim
|
||||
if computed_custody_bit == 1:
|
||||
@@ -613,7 +654,9 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||
slash_validator(state, custody_slashing.malefactor_index)
|
||||
committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
||||
others_count = len(committee) - 1
|
||||
whistleblower_reward = Gwei(malefactor.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT // others_count)
|
||||
whistleblower_reward = Gwei(
|
||||
malefactor.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT // others_count
|
||||
)
|
||||
for attester_index in attesters:
|
||||
if attester_index != custody_slashing.malefactor_index:
|
||||
increase_balance(state, attester_index, whistleblower_reward)
|
||||
@@ -674,8 +717,15 @@ def process_reveal_deadlines(state: BeaconState) -> None:
|
||||
```python
|
||||
def process_challenge_deadlines(state: BeaconState) -> None:
|
||||
for custody_chunk_challenge in state.custody_chunk_challenge_records:
|
||||
if get_current_epoch(state) > custody_chunk_challenge.inclusion_epoch + EPOCHS_PER_CUSTODY_PERIOD:
|
||||
slash_validator(state, custody_chunk_challenge.responder_index, custody_chunk_challenge.challenger_index)
|
||||
if (
|
||||
get_current_epoch(state)
|
||||
> custody_chunk_challenge.inclusion_epoch + EPOCHS_PER_CUSTODY_PERIOD
|
||||
):
|
||||
slash_validator(
|
||||
state,
|
||||
custody_chunk_challenge.responder_index,
|
||||
custody_chunk_challenge.challenger_index,
|
||||
)
|
||||
index_in_records = state.custody_chunk_challenge_records.index(custody_chunk_challenge)
|
||||
state.custody_chunk_challenge_records[index_in_records] = CustodyChunkChallengeRecord()
|
||||
```
|
||||
@@ -685,21 +735,32 @@ def process_challenge_deadlines(state: BeaconState) -> None:
|
||||
```python
|
||||
def process_custody_final_updates(state: BeaconState) -> None:
|
||||
# Clean up exposed RANDAO key reveals
|
||||
state.exposed_derived_secrets[get_current_epoch(state) % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = []
|
||||
state.exposed_derived_secrets[
|
||||
get_current_epoch(state) % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
|
||||
] = []
|
||||
|
||||
# Reset withdrawable epochs if challenge records are empty
|
||||
records = state.custody_chunk_challenge_records
|
||||
validator_indices_in_records = set(record.responder_index for record in records) # non-duplicate
|
||||
validator_indices_in_records = set(
|
||||
record.responder_index for record in records
|
||||
) # non-duplicate
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH
|
||||
if ValidatorIndex(index) in validator_indices_in_records or not_all_secrets_are_revealed:
|
||||
not_all_secrets_are_revealed = (
|
||||
validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH
|
||||
)
|
||||
if (
|
||||
ValidatorIndex(index) in validator_indices_in_records
|
||||
or not_all_secrets_are_revealed
|
||||
):
|
||||
# Delay withdrawable epochs if challenge records are not empty or not all
|
||||
# custody secrets revealed
|
||||
validator.withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||
else:
|
||||
# Reset withdrawable epochs if challenge records are empty and all secrets are revealed
|
||||
if validator.withdrawable_epoch == FAR_FUTURE_EPOCH:
|
||||
validator.withdrawable_epoch = Epoch(validator.all_custody_secrets_revealed_epoch
|
||||
+ MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
validator.withdrawable_epoch = Epoch(
|
||||
validator.all_custody_secrets_revealed_epoch
|
||||
+ MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
)
|
||||
```
|
||||
|
||||
@@ -92,10 +92,9 @@ for which the custody bit is one. The custody bit is computed using the custody
|
||||
secret:
|
||||
|
||||
```python
|
||||
def get_custody_secret(state: BeaconState,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int,
|
||||
epoch: Epoch=None) -> BLSSignature:
|
||||
def get_custody_secret(
|
||||
state: BeaconState, validator_index: ValidatorIndex, privkey: int, epoch: Epoch = None
|
||||
) -> BLSSignature:
|
||||
if epoch is None:
|
||||
epoch = get_current_epoch(state)
|
||||
period = get_custody_period_for_validator(validator_index, epoch)
|
||||
|
||||
@@ -60,7 +60,7 @@ def reverse_bit_order(n: int, order: int):
|
||||
Reverse the bit order of an integer n
|
||||
"""
|
||||
assert is_power_of_two(order)
|
||||
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
|
||||
return int(("{:0" + str(order.bit_length() - 1) + "b}").format(n)[::-1], 2)
|
||||
```
|
||||
|
||||
#### `reverse_bit_order_list`
|
||||
@@ -86,7 +86,7 @@ def das_fft_extension(data: Sequence[Point]) -> Sequence[Point]:
|
||||
such that the second output half of the IFFT is all zeroes.
|
||||
"""
|
||||
poly = inverse_fft(data)
|
||||
return fft(poly + [0]*len(poly))[1::2]
|
||||
return fft(poly + [0] * len(poly))[1::2]
|
||||
```
|
||||
|
||||
### Data recovery
|
||||
@@ -119,11 +119,13 @@ def extend_data(data: Sequence[Point]) -> Sequence[Point]:
|
||||
|
||||
```python
|
||||
def unextend_data(extended_data: Sequence[Point]) -> Sequence[Point]:
|
||||
return extended_data[:len(extended_data)//2]
|
||||
return extended_data[: len(extended_data) // 2]
|
||||
```
|
||||
|
||||
```python
|
||||
def check_multi_kzg_proof(commitment: BLSCommitment, proof: BLSCommitment, x: Point, ys: Sequence[Point]) -> bool:
|
||||
def check_multi_kzg_proof(
|
||||
commitment: BLSCommitment, proof: BLSCommitment, x: Point, ys: Sequence[Point]
|
||||
) -> bool:
|
||||
"""
|
||||
Run a KZG multi-proof check to verify that for the subgroup starting at x,
|
||||
the proof indeed complements the ys to match the commitment.
|
||||
@@ -137,12 +139,12 @@ def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSComm
|
||||
Constructs proofs for samples of extended data (in polynomial form, 2nd half being zeroes).
|
||||
Use the FK20 multi-proof approach to construct proofs for a chunk length of POINTS_PER_SAMPLE.
|
||||
"""
|
||||
... # Omitted for now, refer to KZG implementation resources.
|
||||
... # Omitted for now, refer to KZG implementation resources.
|
||||
```
|
||||
|
||||
```python
|
||||
def commit_to_data(data_as_poly: Sequence[Point]) -> BLSCommitment:
|
||||
"""Commit to a polynomial by """
|
||||
"""Commit to a polynomial by"""
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -151,7 +153,7 @@ def sample_data(slot: Slot, shard: Shard, extended_data: Sequence[Point]) -> Seq
|
||||
assert sample_count <= MAX_SAMPLES_PER_BLOCK
|
||||
# get polynomial form of full extended data, second half will be all zeroes.
|
||||
poly = ifft(reverse_bit_order_list(extended_data))
|
||||
assert all(v == 0 for v in poly[len(poly)//2:])
|
||||
assert all(v == 0 for v in poly[len(poly) // 2 :])
|
||||
proofs = construct_proofs(poly)
|
||||
return [
|
||||
DASSample(
|
||||
@@ -163,15 +165,17 @@ def sample_data(slot: Slot, shard: Shard, extended_data: Sequence[Point]) -> Seq
|
||||
proof=proofs[reverse_bit_order(i, sample_count)],
|
||||
# note: we leave the sample data as-is so it matches the original nicely.
|
||||
# The proof applies to `ys = reverse_bit_order_list(sample.data)`
|
||||
data=extended_data[i*POINTS_PER_SAMPLE:(i+1)*POINTS_PER_SAMPLE]
|
||||
) for i in range(sample_count)
|
||||
data=extended_data[i * POINTS_PER_SAMPLE : (i + 1) * POINTS_PER_SAMPLE],
|
||||
)
|
||||
for i in range(sample_count)
|
||||
]
|
||||
```
|
||||
|
||||
```python
|
||||
def verify_sample(sample: DASSample, sample_count: uint64, commitment: BLSCommitment):
|
||||
domain_pos = reverse_bit_order(sample.index, sample_count)
|
||||
sample_root_of_unity = ROOT_OF_UNITY**MAX_SAMPLES_PER_BLOCK # change point-level to sample-level domain
|
||||
# Change point-level to sample-level domain
|
||||
sample_root_of_unity = ROOT_OF_UNITY**MAX_SAMPLES_PER_BLOCK
|
||||
x = sample_root_of_unity**domain_pos
|
||||
ys = reverse_bit_order_list(sample.data)
|
||||
assert check_multi_kzg_proof(commitment, sample.proof, x, ys)
|
||||
@@ -180,6 +184,8 @@ def verify_sample(sample: DASSample, sample_count: uint64, commitment: BLSCommit
|
||||
```python
|
||||
def reconstruct_extended_data(samples: Sequence[Optional[DASSample]]) -> Sequence[Point]:
|
||||
# Instead of recovering with a point-by-point approach, recover the samples by recovering missing subgroups.
|
||||
subgroups = [None if sample is None else reverse_bit_order_list(sample.data) for sample in samples]
|
||||
subgroups = [
|
||||
None if sample is None else reverse_bit_order_list(sample.data) for sample in samples
|
||||
]
|
||||
return recover_data(subgroups)
|
||||
```
|
||||
|
||||
@@ -25,11 +25,18 @@ and a length.
|
||||
def get_new_dependencies(state: BeaconState) -> Set[DataCommitment]:
|
||||
return set(
|
||||
# Already confirmed during this epoch
|
||||
[c.commitment for c in state.current_epoch_pending_headers if c.confirmed] +
|
||||
[c.commitment for c in state.current_epoch_pending_headers if c.confirmed]
|
||||
+
|
||||
# Already confirmed during previous epoch
|
||||
[c.commitment for c in state.previous_epoch_pending_headers if c.confirmed] +
|
||||
[c.commitment for c in state.previous_epoch_pending_headers if c.confirmed]
|
||||
+
|
||||
# Confirmed in the epoch before the previous
|
||||
[c for c in shard for shard in state.grandparent_epoch_confirmed_commitments if c != DataCommitment()]
|
||||
[
|
||||
c
|
||||
for c in shard
|
||||
for shard in state.grandparent_epoch_confirmed_commitments
|
||||
if c != DataCommitment()
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -122,13 +122,15 @@ class BuilderBlockBid(Container):
|
||||
|
||||
execution_payload_root: Root
|
||||
|
||||
sharded_data_commitment_root: Root # Root of the sharded data (only data, not beacon/builder block commitments)
|
||||
sharded_data_commitment_root: (
|
||||
Root # Root of the sharded data (only data, not beacon/builder block commitments)
|
||||
)
|
||||
|
||||
sharded_data_commitment_count: uint64 # Count of sharded data commitments
|
||||
sharded_data_commitment_count: uint64 # Count of sharded data commitments
|
||||
|
||||
bid: Gwei # Block builder bid paid to proposer
|
||||
bid: Gwei # Block builder bid paid to proposer
|
||||
|
||||
validator_index: ValidatorIndex # Validator index for this bid
|
||||
validator_index: ValidatorIndex # Validator index for this bid
|
||||
|
||||
# Block builders use an Eth1 address -- need signature as
|
||||
# block bid and data gas base fees will be charged to this address
|
||||
@@ -142,7 +144,7 @@ class BuilderBlockBid(Container):
|
||||
```python
|
||||
class BuilderBlockBidWithRecipientAddress(Container):
|
||||
builder_block_bid: Union[None, BuilderBlockBid]
|
||||
recipient_address: ExecutionAddress # Address to receive the block builder bid
|
||||
recipient_address: ExecutionAddress # Address to receive the block builder bid
|
||||
```
|
||||
|
||||
#### `ShardedCommitmentsContainer`
|
||||
@@ -284,18 +286,31 @@ def verify_builder_block_bid(state: BeaconState, block: BeaconBlock) -> None:
|
||||
if is_builder_block_slot(block.slot):
|
||||
# Get last builder block bid
|
||||
assert state.blocks_since_builder_block[-1].body.payload_data.selector == 0
|
||||
builder_block_bid = state.blocks_since_builder_block[-1].body.payload_data.value.builder_block_bid
|
||||
builder_block_bid = state.blocks_since_builder_block[
|
||||
-1
|
||||
].body.payload_data.value.builder_block_bid
|
||||
assert builder_block_bid.slot + 1 == block.slot
|
||||
|
||||
assert block.body.payload_data.selector == 1 # Verify that builder block does not contain bid
|
||||
assert (
|
||||
block.body.payload_data.selector == 1
|
||||
) # Verify that builder block does not contain bid
|
||||
|
||||
builder_block_data = block.body.payload_data.value
|
||||
|
||||
assert builder_block_bid.execution_payload_root == hash_tree_root(builder_block_data.execution_payload)
|
||||
assert builder_block_bid.execution_payload_root == hash_tree_root(
|
||||
builder_block_data.execution_payload
|
||||
)
|
||||
|
||||
assert builder_block_bid.sharded_data_commitment_count == builder_block_data.included_sharded_data_commitments
|
||||
assert (
|
||||
builder_block_bid.sharded_data_commitment_count
|
||||
== builder_block_data.included_sharded_data_commitments
|
||||
)
|
||||
|
||||
assert builder_block_bid.sharded_data_commitment_root == hash_tree_root(builder_block_data.sharded_commitments[-builder_block_bid.included_sharded_data_commitments:])
|
||||
assert builder_block_bid.sharded_data_commitment_root == hash_tree_root(
|
||||
builder_block_data.sharded_commitments[
|
||||
-builder_block_bid.included_sharded_data_commitments :
|
||||
]
|
||||
)
|
||||
|
||||
assert builder_block_bid.validator_index == block.proposer_index
|
||||
|
||||
@@ -324,47 +339,70 @@ def process_sharded_data(state: BeaconState, block: BeaconBlock) -> None:
|
||||
sharded_commitments_container = block.body.payload_data.value.sharded_commitments_container
|
||||
|
||||
# Verify not too many commitments
|
||||
assert len(sharded_commitments_container.sharded_commitments) // 2 <= get_active_shard_count(state, get_current_epoch(state))
|
||||
assert len(
|
||||
sharded_commitments_container.sharded_commitments
|
||||
) // 2 <= get_active_shard_count(state, get_current_epoch(state))
|
||||
|
||||
# Verify the degree proof
|
||||
r = hash_to_bls_field(sharded_commitments_container.sharded_commitments, 0)
|
||||
r_powers = compute_powers(r, len(sharded_commitments_container.sharded_commitments))
|
||||
combined_commitment = elliptic_curve_lincomb(sharded_commitments_container.sharded_commitments, r_powers)
|
||||
combined_commitment = elliptic_curve_lincomb(
|
||||
sharded_commitments_container.sharded_commitments, r_powers
|
||||
)
|
||||
|
||||
payload_field_elements_per_blob = SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE // 2
|
||||
|
||||
verify_degree_proof(combined_commitment, payload_field_elements_per_blob, sharded_commitments_container.degree_proof)
|
||||
verify_degree_proof(
|
||||
combined_commitment,
|
||||
payload_field_elements_per_blob,
|
||||
sharded_commitments_container.degree_proof,
|
||||
)
|
||||
|
||||
# Verify that the 2*N commitments lie on a degree < N polynomial
|
||||
low_degree_check(sharded_commitments_container.sharded_commitments)
|
||||
|
||||
# Verify that blocks since the last builder block have been included
|
||||
blocks_chunked = [bytes_to_field_elements(ssz_serialize(block)) for block in state.blocks_since_builder_block]
|
||||
blocks_chunked = [
|
||||
bytes_to_field_elements(ssz_serialize(block))
|
||||
for block in state.blocks_since_builder_block
|
||||
]
|
||||
block_vectors = []
|
||||
|
||||
for block_chunked in blocks_chunked:
|
||||
for i in range(0, len(block_chunked), payload_field_elements_per_blob):
|
||||
block_vectors.append(block_chunked[i:i + payload_field_elements_per_blob])
|
||||
block_vectors.append(block_chunked[i : i + payload_field_elements_per_blob])
|
||||
|
||||
number_of_blobs = len(block_vectors)
|
||||
r = hash_to_bls_field(sharded_commitments_container.sharded_commitments[:number_of_blobs], 0)
|
||||
x = hash_to_bls_field(sharded_commitments_container.sharded_commitments[:number_of_blobs], 1)
|
||||
r = hash_to_bls_field(
|
||||
sharded_commitments_container.sharded_commitments[:number_of_blobs], 0
|
||||
)
|
||||
x = hash_to_bls_field(
|
||||
sharded_commitments_container.sharded_commitments[:number_of_blobs], 1
|
||||
)
|
||||
|
||||
r_powers = compute_powers(r, number_of_blobs)
|
||||
combined_vector = vector_lincomb(block_vectors, r_powers)
|
||||
combined_commitment = elliptic_curve_lincomb(sharded_commitments_container.sharded_commitments[:number_of_blobs], r_powers)
|
||||
combined_commitment = elliptic_curve_lincomb(
|
||||
sharded_commitments_container.sharded_commitments[:number_of_blobs], r_powers
|
||||
)
|
||||
y = evaluate_polynomial_in_evaluation_form(combined_vector, x)
|
||||
|
||||
verify_kzg_proof(combined_commitment, x, y, sharded_commitments_container.block_verification_kzg_proof)
|
||||
verify_kzg_proof(
|
||||
combined_commitment, x, y, sharded_commitments_container.block_verification_kzg_proof
|
||||
)
|
||||
|
||||
# Verify that number of sharded data commitments is correctly indicated
|
||||
assert 2 * (number_of_blobs + included_sharded_data_commitments) == len(sharded_commitments_container.sharded_commitments)
|
||||
assert 2 * (number_of_blobs + included_sharded_data_commitments) == len(
|
||||
sharded_commitments_container.sharded_commitments
|
||||
)
|
||||
```
|
||||
|
||||
#### Execution payload
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, block: BeaconBlock, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, block: BeaconBlock, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
if is_builder_block_slot(block.slot):
|
||||
assert block.body.payload_data.selector == 1
|
||||
payload = block.body.payload_data.value.execution_payload
|
||||
@@ -378,21 +416,27 @@ def process_execution_payload(state: BeaconState, block: BeaconBlock, execution_
|
||||
|
||||
# Get sharded data commitments
|
||||
sharded_commitments_container = block.body.sharded_commitments_container
|
||||
sharded_data_commitments = sharded_commitments_container.sharded_commitments[-sharded_commitments_container.included_sharded_data_commitments:]
|
||||
sharded_data_commitments = sharded_commitments_container.sharded_commitments[
|
||||
-sharded_commitments_container.included_sharded_data_commitments :
|
||||
]
|
||||
|
||||
# Get all unprocessed builder block bids
|
||||
unprocessed_builder_block_bid_with_recipient_addresses = []
|
||||
for block in state.blocks_since_builder_block[1:]:
|
||||
unprocessed_builder_block_bid_with_recipient_addresses.append(block.body.builder_block_bid_with_recipient_address.value)
|
||||
unprocessed_builder_block_bid_with_recipient_addresses.append(
|
||||
block.body.builder_block_bid_with_recipient_address.value
|
||||
)
|
||||
|
||||
# Verify the execution payload is valid
|
||||
# The execution engine gets two extra payloads: One for the sharded data commitments (these are needed to verify type 3 transactions)
|
||||
# and one for all so far unprocessed builder block bids:
|
||||
# * The execution engine needs to transfer the balance from the bidder to the proposer.
|
||||
# * The execution engine needs to deduct data gas fees from the bidder balances
|
||||
assert execution_engine.execute_payload(payload,
|
||||
sharded_data_commitments,
|
||||
unprocessed_builder_block_bid_with_recipient_addresses)
|
||||
assert execution_engine.execute_payload(
|
||||
payload,
|
||||
sharded_data_commitments,
|
||||
unprocessed_builder_block_bid_with_recipient_addresses,
|
||||
)
|
||||
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
|
||||
@@ -93,7 +93,7 @@ def reverse_bit_order(n: int, order: int) -> int:
|
||||
"""
|
||||
assert is_power_of_two(order)
|
||||
# Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
|
||||
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
|
||||
return int(("{:0" + str(order.bit_length() - 1) + "b}").format(n)[::-1], 2)
|
||||
```
|
||||
|
||||
#### `list_to_reverse_bit_order`
|
||||
@@ -182,7 +182,7 @@ def low_degree_check(commitments: List[KZGCommitment]):
|
||||
# For an efficient implementation, B and Bprime should be precomputed
|
||||
def B(z):
|
||||
r = 1
|
||||
for w in roots[:d + 1]:
|
||||
for w in roots[: d + 1]:
|
||||
r = r * (z - w) % BLS_MODULUS
|
||||
return r
|
||||
|
||||
@@ -190,14 +190,18 @@ def low_degree_check(commitments: List[KZGCommitment]):
|
||||
r = 0
|
||||
for i in range(d + 1):
|
||||
m = 1
|
||||
for w in roots[:i] + roots[i + 1:d + 1]:
|
||||
for w in roots[:i] + roots[i + 1 : d + 1]:
|
||||
m = m * (z - w) % BLS_MODULUS
|
||||
r = (r + m) % BLS_MODULUS
|
||||
return r
|
||||
|
||||
coefs = []
|
||||
for i in range(K):
|
||||
coefs.append( - (r_to_K - 1) * bls_modular_inverse(K * roots[i * (K - 1) % K] * (r - roots[i])) % BLS_MODULUS)
|
||||
coefs.append(
|
||||
-(r_to_K - 1)
|
||||
* bls_modular_inverse(K * roots[i * (K - 1) % K] * (r - roots[i]))
|
||||
% BLS_MODULUS
|
||||
)
|
||||
for i in range(d + 1):
|
||||
coefs[i] = (coefs[i] + B(r) * bls_modular_inverse(Bprime(r) * (r - roots[i]))) % BLS_MODULUS
|
||||
|
||||
@@ -207,11 +211,13 @@ def low_degree_check(commitments: List[KZGCommitment]):
|
||||
#### `vector_lincomb`
|
||||
|
||||
```python
|
||||
def vector_lincomb(vectors: List[List[BLSFieldElement]], scalars: List[BLSFieldElement]) -> List[BLSFieldElement]:
|
||||
def vector_lincomb(
|
||||
vectors: List[List[BLSFieldElement]], scalars: List[BLSFieldElement]
|
||||
) -> List[BLSFieldElement]:
|
||||
"""
|
||||
Compute a linear combination of field element vectors.
|
||||
"""
|
||||
r = [0]*len(vectors[0])
|
||||
r = [0] * len(vectors[0])
|
||||
for v, a in zip(vectors, scalars):
|
||||
for i, x in enumerate(v):
|
||||
r[i] = (r[i] + a * x) % BLS_MODULUS
|
||||
@@ -225,7 +231,7 @@ def bytes_to_field_elements(block: bytes) -> List[BLSFieldElement]:
|
||||
"""
|
||||
Slices a block into 31-byte chunks that can fit into field elements.
|
||||
"""
|
||||
sliced_block = [block[i:i + 31] for i in range(0, len(bytes), 31)]
|
||||
sliced_block = [block[i : i + 31] for i in range(0, len(bytes), 31)]
|
||||
return [BLSFieldElement(int.from_bytes(x, "little")) for x in sliced_block]
|
||||
```
|
||||
|
||||
@@ -234,7 +240,9 @@ def bytes_to_field_elements(block: bytes) -> List[BLSFieldElement]:
|
||||
#### `add_polynomials`
|
||||
|
||||
```python
|
||||
def add_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficients) -> BLSPolynomialByCoefficients:
|
||||
def add_polynomials(
|
||||
a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficients
|
||||
) -> BLSPolynomialByCoefficients:
|
||||
"""
|
||||
Sum the polynomials ``a`` and ``b`` given by their coefficients.
|
||||
"""
|
||||
@@ -245,7 +253,9 @@ def add_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficien
|
||||
#### `multiply_polynomials`
|
||||
|
||||
```python
|
||||
def multiply_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficients) -> BLSPolynomialByCoefficients:
|
||||
def multiply_polynomials(
|
||||
a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoefficients
|
||||
) -> BLSPolynomialByCoefficients:
|
||||
"""
|
||||
Multiplies the polynomials `a` and `b` given by their coefficients
|
||||
"""
|
||||
@@ -259,7 +269,9 @@ def multiply_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoeff
|
||||
#### `interpolate_polynomial`
|
||||
|
||||
```python
|
||||
def interpolate_polynomial(xs: List[BLSFieldElement], ys: List[BLSFieldElement]) -> BLSPolynomialByCoefficients:
|
||||
def interpolate_polynomial(
|
||||
xs: List[BLSFieldElement], ys: List[BLSFieldElement]
|
||||
) -> BLSPolynomialByCoefficients:
|
||||
"""
|
||||
Lagrange interpolation
|
||||
"""
|
||||
@@ -282,7 +294,9 @@ def interpolate_polynomial(xs: List[BLSFieldElement], ys: List[BLSFieldElement])
|
||||
#### `evaluate_polynomial_in_evaluation_form`
|
||||
|
||||
```python
|
||||
def evaluate_polynomial_in_evaluation_form(poly: BLSPolynomialByEvaluations, x: BLSFieldElement) -> BLSFieldElement:
|
||||
def evaluate_polynomial_in_evaluation_form(
|
||||
poly: BLSPolynomialByEvaluations, x: BLSFieldElement
|
||||
) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluates a polynomial (in evaluation form) at an arbitrary point
|
||||
"""
|
||||
@@ -316,7 +330,9 @@ Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf).
|
||||
#### `elliptic_curve_lincomb`
|
||||
|
||||
```python
|
||||
def elliptic_curve_lincomb(points: List[KZGCommitment], scalars: List[BLSFieldElement]) -> KZGCommitment:
|
||||
def elliptic_curve_lincomb(
|
||||
points: List[KZGCommitment], scalars: List[BLSFieldElement]
|
||||
) -> KZGCommitment:
|
||||
"""
|
||||
BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants.
|
||||
This is a non-optimized implementation.
|
||||
@@ -337,9 +353,10 @@ def hash_to_bls_field(x: Container, challenge_number: uint64) -> BLSFieldElement
|
||||
This function is used to generate Fiat-Shamir challenges. The output is not uniform over the BLS field.
|
||||
"""
|
||||
return (
|
||||
(int.from_bytes(hash(hash_tree_root(x) + int.to_bytes(challenge_number, 32, "little")), "little"))
|
||||
% BLS_MODULUS
|
||||
)
|
||||
int.from_bytes(
|
||||
hash(hash_tree_root(x) + int.to_bytes(challenge_number, 32, "little")), "little"
|
||||
)
|
||||
) % BLS_MODULUS
|
||||
```
|
||||
|
||||
### KZG operations
|
||||
@@ -347,35 +364,39 @@ def hash_to_bls_field(x: Container, challenge_number: uint64) -> BLSFieldElement
|
||||
#### `verify_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof(commitment: KZGCommitment, x: BLSFieldElement, y: BLSFieldElement, proof: KZGCommitment) -> None:
|
||||
def verify_kzg_proof(
|
||||
commitment: KZGCommitment, x: BLSFieldElement, y: BLSFieldElement, proof: KZGCommitment
|
||||
) -> None:
|
||||
"""
|
||||
Check that `proof` is a valid KZG proof for the polynomial committed to by `commitment` evaluated
|
||||
at `x` equals `y`.
|
||||
"""
|
||||
zero_poly = G2_SETUP[1].add(G2_SETUP[0].mult(x).neg())
|
||||
|
||||
assert (
|
||||
bls.Pairing(proof, zero_poly)
|
||||
== bls.Pairing(commitment.add(G1_SETUP[0].mult(y).neg), G2_SETUP[0])
|
||||
assert bls.Pairing(proof, zero_poly) == bls.Pairing(
|
||||
commitment.add(G1_SETUP[0].mult(y).neg), G2_SETUP[0]
|
||||
)
|
||||
```
|
||||
|
||||
#### `verify_kzg_multiproof`
|
||||
|
||||
```python
|
||||
def verify_kzg_multiproof(commitment: KZGCommitment,
|
||||
xs: List[BLSFieldElement],
|
||||
ys: List[BLSFieldElement],
|
||||
proof: KZGCommitment) -> None:
|
||||
def verify_kzg_multiproof(
|
||||
commitment: KZGCommitment,
|
||||
xs: List[BLSFieldElement],
|
||||
ys: List[BLSFieldElement],
|
||||
proof: KZGCommitment,
|
||||
) -> None:
|
||||
"""
|
||||
Verify a KZG multiproof.
|
||||
"""
|
||||
zero_poly = elliptic_curve_lincomb(G2_SETUP[:len(xs)], interpolate_polynomial(xs, [0] * len(ys)))
|
||||
interpolated_poly = elliptic_curve_lincomb(G2_SETUP[:len(xs)], interpolate_polynomial(xs, ys))
|
||||
zero_poly = elliptic_curve_lincomb(
|
||||
G2_SETUP[: len(xs)], interpolate_polynomial(xs, [0] * len(ys))
|
||||
)
|
||||
interpolated_poly = elliptic_curve_lincomb(G2_SETUP[: len(xs)], interpolate_polynomial(xs, ys))
|
||||
|
||||
assert (
|
||||
bls.Pairing(proof, zero_poly)
|
||||
== bls.Pairing(commitment.add(interpolated_poly.neg()), G2_SETUP[0])
|
||||
assert bls.Pairing(proof, zero_poly) == bls.Pairing(
|
||||
commitment.add(interpolated_poly.neg()), G2_SETUP[0]
|
||||
)
|
||||
```
|
||||
|
||||
@@ -387,8 +408,5 @@ def verify_degree_proof(commitment: KZGCommitment, degree_bound: uint64, proof:
|
||||
Verifies that the commitment is of polynomial degree < degree_bound.
|
||||
"""
|
||||
|
||||
assert (
|
||||
bls.Pairing(proof, G2_SETUP[0])
|
||||
== bls.Pairing(commitment, G2_SETUP[-degree_bound])
|
||||
)
|
||||
assert bls.Pairing(proof, G2_SETUP[0]) == bls.Pairing(commitment, G2_SETUP[-degree_bound])
|
||||
```
|
||||
|
||||
@@ -56,14 +56,20 @@ privacy.
|
||||
|
||||
```python
|
||||
def get_validator_row_subnets(validator: Validator, epoch: Epoch) -> List[uint64]:
|
||||
return [int.from_bytes(hash_tree_root([validator.pubkey, 0, i])) for i in range(VALIDATOR_SAMPLE_ROW_COUNT)]
|
||||
return [
|
||||
int.from_bytes(hash_tree_root([validator.pubkey, 0, i]))
|
||||
for i in range(VALIDATOR_SAMPLE_ROW_COUNT)
|
||||
]
|
||||
```
|
||||
|
||||
### `get_validator_column_subnets`
|
||||
|
||||
```python
|
||||
def get_validator_column_subnets(validator: Validator, epoch: Epoch) -> List[uint64]:
|
||||
return [int.from_bytes(hash_tree_root([validator.pubkey, 1, i])) for i in range(VALIDATOR_SAMPLE_COLUMN_COUNT)]
|
||||
return [
|
||||
int.from_bytes(hash_tree_root([validator.pubkey, 1, i]))
|
||||
for i in range(VALIDATOR_SAMPLE_COLUMN_COUNT)
|
||||
]
|
||||
```
|
||||
|
||||
### `reconstruct_polynomial`
|
||||
@@ -73,7 +79,6 @@ def reconstruct_polynomial(samples: List[SignedShardSample]) -> List[SignedShard
|
||||
"""
|
||||
Reconstructs one full row/column from at least 1/2 of the samples
|
||||
"""
|
||||
|
||||
```
|
||||
|
||||
## Sample verification
|
||||
@@ -92,13 +97,20 @@ def verify_sample(state: BeaconState, block: BeaconBlock, sample: SignedShardSam
|
||||
# signing_root = compute_signing_root(sample, get_domain(state, DOMAIN_SHARD_SAMPLE))
|
||||
# assert bls.Verify(sample.builder, signing_root, sample.signature)
|
||||
|
||||
roots_in_rbo = list_to_reverse_bit_order(roots_of_unity(SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE))
|
||||
roots_in_rbo = list_to_reverse_bit_order(
|
||||
roots_of_unity(SAMPLES_PER_BLOB * FIELD_ELEMENTS_PER_SAMPLE)
|
||||
)
|
||||
|
||||
# Verify KZG proof
|
||||
verify_kzg_multiproof(block.body.payload_data.value.sharded_commitments_container.sharded_commitments[sample.row],
|
||||
roots_in_rbo[sample.column * FIELD_ELEMENTS_PER_SAMPLE:(sample.column + 1) * FIELD_ELEMENTS_PER_SAMPLE],
|
||||
sample.data,
|
||||
sample.proof)
|
||||
verify_kzg_multiproof(
|
||||
block.body.payload_data.value.sharded_commitments_container.sharded_commitments[sample.row],
|
||||
roots_in_rbo[
|
||||
sample.column * FIELD_ELEMENTS_PER_SAMPLE : (sample.column + 1)
|
||||
* FIELD_ELEMENTS_PER_SAMPLE
|
||||
],
|
||||
sample.data,
|
||||
sample.proof,
|
||||
)
|
||||
```
|
||||
|
||||
# Beacon chain responsibilities
|
||||
|
||||
@@ -164,7 +164,9 @@ class ExecutionWitness(Container):
|
||||
##### `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -180,7 +182,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
# Verify the execution payload is valid
|
||||
# Pass `versioned_hashes` to Execution Engine
|
||||
# Pass `parent_beacon_block_root` to Execution Engine
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments
|
||||
]
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(
|
||||
execution_payload=payload,
|
||||
|
||||
@@ -97,53 +97,42 @@ def upgrade_to_eip6800(pre: deneb.BeaconState) -> BeaconState:
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
execution_witness_root=hash_tree_root(ExecutionWitness([], [])) # New in eip6800
|
||||
# [New in EIP6800]
|
||||
execution_witness_root=hash_tree_root(ExecutionWitness([], [])),
|
||||
)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=EIP6800_FORK_VERSION, # [Modified in eip6800]
|
||||
# [Modified in EIP6800]
|
||||
current_version=EIP6800_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
)
|
||||
|
||||
|
||||
@@ -44,10 +44,7 @@ def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) ->
|
||||
"""
|
||||
Check if ``validator`` index can be re-assigned to a new deposit.
|
||||
"""
|
||||
return (
|
||||
epoch > validator.withdrawable_epoch + SAFE_EPOCHS_TO_REUSE_INDEX
|
||||
and balance == 0
|
||||
)
|
||||
return epoch > validator.withdrawable_epoch + SAFE_EPOCHS_TO_REUSE_INDEX and balance == 0
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
@@ -103,9 +103,11 @@ all related data structures and verifier code (along with tests) is specified in
|
||||
repository.
|
||||
|
||||
```python
|
||||
def IsValidWhiskShuffleProof(pre_shuffle_trackers: Sequence[WhiskTracker],
|
||||
post_shuffle_trackers: Sequence[WhiskTracker],
|
||||
shuffle_proof: WhiskShuffleProof) -> bool:
|
||||
def IsValidWhiskShuffleProof(
|
||||
pre_shuffle_trackers: Sequence[WhiskTracker],
|
||||
post_shuffle_trackers: Sequence[WhiskTracker],
|
||||
shuffle_proof: WhiskShuffleProof,
|
||||
) -> bool:
|
||||
"""
|
||||
Verify `post_shuffle_trackers` is a permutation of `pre_shuffle_trackers`.
|
||||
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/blob/dev/curdleproofs/curdleproofs/whisk_interface.py.
|
||||
@@ -119,9 +121,9 @@ def IsValidWhiskShuffleProof(pre_shuffle_trackers: Sequence[WhiskTracker],
|
||||
```
|
||||
|
||||
```python
|
||||
def IsValidWhiskOpeningProof(tracker: WhiskTracker,
|
||||
k_commitment: BLSG1Point,
|
||||
tracker_proof: WhiskTrackerProof) -> bool:
|
||||
def IsValidWhiskOpeningProof(
|
||||
tracker: WhiskTracker, k_commitment: BLSG1Point, tracker_proof: WhiskTrackerProof
|
||||
) -> bool:
|
||||
"""
|
||||
Verify knowledge of `k` such that `tracker.k_r_G == k * tracker.r_G` and `k_commitment == k * BLS_G1_GENERATOR`.
|
||||
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/blob/dev/curdleproofs/curdleproofs/whisk_interface.py.
|
||||
@@ -185,12 +187,12 @@ class BeaconState(Container):
|
||||
def select_whisk_proposer_trackers(state: BeaconState, epoch: Epoch) -> None:
|
||||
# Select proposer trackers from candidate trackers
|
||||
proposer_seed = get_seed(
|
||||
state,
|
||||
Epoch(saturating_sub(epoch, PROPOSER_SELECTION_GAP)),
|
||||
DOMAIN_PROPOSER_SELECTION
|
||||
state, Epoch(saturating_sub(epoch, PROPOSER_SELECTION_GAP)), DOMAIN_PROPOSER_SELECTION
|
||||
)
|
||||
for i in range(PROPOSER_TRACKERS_COUNT):
|
||||
index = compute_shuffled_index(uint64(i), uint64(len(state.whisk_candidate_trackers)), proposer_seed)
|
||||
index = compute_shuffled_index(
|
||||
uint64(i), uint64(len(state.whisk_candidate_trackers)), proposer_seed
|
||||
)
|
||||
state.whisk_proposer_trackers[i] = state.whisk_candidate_trackers[index]
|
||||
```
|
||||
|
||||
@@ -200,14 +202,18 @@ def select_whisk_candidate_trackers(state: BeaconState, epoch: Epoch) -> None:
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
for i in range(CANDIDATE_TRACKERS_COUNT):
|
||||
seed = hash(get_seed(state, epoch, DOMAIN_CANDIDATE_SELECTION) + uint_to_bytes(uint64(i)))
|
||||
candidate_index = compute_proposer_index(state, active_validator_indices, seed) # sample by effective balance
|
||||
candidate_index = compute_proposer_index(
|
||||
state, active_validator_indices, seed
|
||||
) # sample by effective balance
|
||||
state.whisk_candidate_trackers[i] = state.whisk_trackers[candidate_index]
|
||||
```
|
||||
|
||||
```python
|
||||
def process_whisk_updates(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % EPOCHS_PER_SHUFFLING_PHASE == 0: # select trackers at the start of shuffling phases
|
||||
if (
|
||||
next_epoch % EPOCHS_PER_SHUFFLING_PHASE == 0
|
||||
): # select trackers at the start of shuffling phases
|
||||
select_whisk_proposer_trackers(state, next_epoch)
|
||||
select_whisk_candidate_trackers(state, next_epoch)
|
||||
```
|
||||
@@ -267,7 +273,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
||||
# Verify proposer is not slashed
|
||||
proposer = state.validators[block.proposer_index]
|
||||
assert not proposer.slashed
|
||||
process_whisk_opening_proof(state, block) # [New in EIP7441]
|
||||
process_whisk_opening_proof(state, block) # [New in EIP7441]
|
||||
```
|
||||
|
||||
### Whisk
|
||||
@@ -339,7 +345,9 @@ def process_shuffled_trackers(state: BeaconState, body: BeaconBlockBody) -> None
|
||||
|
||||
```python
|
||||
def is_k_commitment_unique(state: BeaconState, k_commitment: BLSG1Point) -> bool:
|
||||
return all([whisk_k_commitment != k_commitment for whisk_k_commitment in state.whisk_k_commitments])
|
||||
return all(
|
||||
[whisk_k_commitment != k_commitment for whisk_k_commitment in state.whisk_k_commitments]
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -379,7 +387,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
```python
|
||||
def get_initial_whisk_k(validator_index: ValidatorIndex, counter: int) -> BLSFieldElement:
|
||||
# hash `validator_index || counter`
|
||||
return BLSFieldElement(bytes_to_bls_field(hash(uint_to_bytes(validator_index) + uint_to_bytes(uint64(counter)))))
|
||||
return BLSFieldElement(
|
||||
bytes_to_bls_field(hash(uint_to_bytes(validator_index) + uint_to_bytes(uint64(counter))))
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -403,10 +413,9 @@ def get_initial_tracker(k: BLSFieldElement) -> WhiskTracker:
|
||||
```
|
||||
|
||||
```python
|
||||
def add_validator_to_registry(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64) -> None:
|
||||
def add_validator_to_registry(
|
||||
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> None:
|
||||
index = get_index_for_new_validator(state)
|
||||
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount)
|
||||
set_or_append_list(state.validators, index, validator)
|
||||
@@ -427,6 +436,8 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
||||
"""
|
||||
Return the beacon proposer index at the current slot.
|
||||
"""
|
||||
assert state.latest_block_header.slot == state.slot # sanity check `process_block_header` has been called
|
||||
assert (
|
||||
state.latest_block_header.slot == state.slot
|
||||
) # sanity check `process_block_header` has been called
|
||||
return state.latest_block_header.proposer_index
|
||||
```
|
||||
|
||||
@@ -55,13 +55,15 @@ all the setup ourselves in `upgrade_to_whisk()` below.
|
||||
```python
|
||||
def upgrade_to_eip7441(pre: capella.BeaconState) -> BeaconState:
|
||||
# Compute initial unsafe trackers for all validators
|
||||
ks = [get_initial_whisk_k(ValidatorIndex(validator_index), 0) for validator_index in range(len(pre.validators))]
|
||||
ks = [
|
||||
get_initial_whisk_k(ValidatorIndex(validator_index), 0)
|
||||
for validator_index in range(len(pre.validators))
|
||||
]
|
||||
whisk_k_commitments = [get_k_commitment(k) for k in ks]
|
||||
whisk_trackers = [get_initial_tracker(k) for k in ks]
|
||||
|
||||
epoch = get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
@@ -70,47 +72,38 @@ def upgrade_to_eip7441(pre: capella.BeaconState) -> BeaconState:
|
||||
current_version=EIP7441_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=[],
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
# Whisk
|
||||
whisk_proposer_trackers=[WhiskTracker() for _ in range(PROPOSER_TRACKERS_COUNT)], # [New in EIP7441]
|
||||
whisk_candidate_trackers=[WhiskTracker() for _ in range(CANDIDATE_TRACKERS_COUNT)], # [New in EIP7441]
|
||||
whisk_trackers=whisk_trackers, # [New in EIP7441]
|
||||
whisk_k_commitments=whisk_k_commitments, # [New in EIP7441]
|
||||
# [New in EIP7441]
|
||||
whisk_proposer_trackers=[WhiskTracker() for _ in range(PROPOSER_TRACKERS_COUNT)],
|
||||
# [New in EIP7441]
|
||||
whisk_candidate_trackers=[WhiskTracker() for _ in range(CANDIDATE_TRACKERS_COUNT)],
|
||||
# [New in EIP7441]
|
||||
whisk_trackers=whisk_trackers,
|
||||
# [New in EIP7441]
|
||||
whisk_k_commitments=whisk_k_commitments,
|
||||
)
|
||||
|
||||
# Do a candidate selection followed by a proposer selection so that we have proposers for the upcoming day
|
||||
|
||||
@@ -326,8 +326,8 @@ def remove_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlag
|
||||
|
||||
```python
|
||||
def is_valid_indexed_payload_attestation(
|
||||
state: BeaconState,
|
||||
indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
|
||||
) -> bool:
|
||||
"""
|
||||
Check if ``indexed_payload_attestation`` is not empty, has sorted and unique indices and has
|
||||
a valid aggregate signature.
|
||||
@@ -395,7 +395,10 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V
|
||||
for index in committee_indices:
|
||||
committee = get_beacon_committee(state, attestation.data.slot, index)
|
||||
committee_attesters = set(
|
||||
index for i, index in enumerate(committee) if attestation.aggregation_bits[committee_offset + i])
|
||||
index
|
||||
for i, index in enumerate(committee)
|
||||
if attestation.aggregation_bits[committee_offset + i]
|
||||
)
|
||||
output = output.union(committee_attesters)
|
||||
committee_offset += len(committee)
|
||||
|
||||
@@ -408,8 +411,9 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V
|
||||
#### `get_payload_attesting_indices`
|
||||
|
||||
```python
|
||||
def get_payload_attesting_indices(state: BeaconState, slot: Slot,
|
||||
payload_attestation: PayloadAttestation) -> Set[ValidatorIndex]:
|
||||
def get_payload_attesting_indices(
|
||||
state: BeaconState, slot: Slot, payload_attestation: PayloadAttestation
|
||||
) -> Set[ValidatorIndex]:
|
||||
"""
|
||||
Return the set of attesting indices corresponding to ``payload_attestation``.
|
||||
"""
|
||||
@@ -420,8 +424,9 @@ def get_payload_attesting_indices(state: BeaconState, slot: Slot,
|
||||
#### `get_indexed_payload_attestation`
|
||||
|
||||
```python
|
||||
def get_indexed_payload_attestation(state: BeaconState, slot: Slot,
|
||||
payload_attestation: PayloadAttestation) -> IndexedPayloadAttestation:
|
||||
def get_indexed_payload_attestation(
|
||||
state: BeaconState, slot: Slot, payload_attestation: PayloadAttestation
|
||||
) -> IndexedPayloadAttestation:
|
||||
"""
|
||||
Return the indexed payload attestation corresponding to ``payload_attestation``.
|
||||
"""
|
||||
@@ -491,7 +496,9 @@ def process_withdrawals(state: BeaconState) -> None:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
|
||||
# Update pending partial withdrawals
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[
|
||||
partial_withdrawals_count:
|
||||
]
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(withdrawals) != 0:
|
||||
@@ -501,7 +508,9 @@ def process_withdrawals(state: BeaconState) -> None:
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex((withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
next_validator_index = ValidatorIndex(
|
||||
(withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
@@ -515,11 +524,14 @@ def process_withdrawals(state: BeaconState) -> None:
|
||||
##### New `verify_execution_payload_header_signature`
|
||||
|
||||
```python
|
||||
def verify_execution_payload_header_signature(state: BeaconState,
|
||||
signed_header: SignedExecutionPayloadHeader) -> bool:
|
||||
def verify_execution_payload_header_signature(
|
||||
state: BeaconState, signed_header: SignedExecutionPayloadHeader
|
||||
) -> bool:
|
||||
# Check the signature
|
||||
builder = state.validators[signed_header.message.builder_index]
|
||||
signing_root = compute_signing_root(signed_header.message, get_domain(state, DOMAIN_BEACON_BUILDER))
|
||||
signing_root = compute_signing_root(
|
||||
signed_header.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||
)
|
||||
return bls.Verify(builder.pubkey, signing_root, signed_header.signature)
|
||||
```
|
||||
|
||||
@@ -563,7 +575,9 @@ def process_execution_payload_header(state: BeaconState, block: BeaconBlock) ->
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
assert len(body.deposits) == min(
|
||||
MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index
|
||||
)
|
||||
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
@@ -586,7 +600,9 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
###### `process_payload_attestation`
|
||||
|
||||
```python
|
||||
def process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation) -> None:
|
||||
def process_payload_attestation(
|
||||
state: BeaconState, payload_attestation: PayloadAttestation
|
||||
) -> None:
|
||||
# Check that the attestation is for the parent beacon block
|
||||
data = payload_attestation.data
|
||||
assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
@@ -594,7 +610,9 @@ def process_payload_attestation(state: BeaconState, payload_attestation: Payload
|
||||
assert data.slot + 1 == state.slot
|
||||
|
||||
# Verify signature
|
||||
indexed_payload_attestation = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
||||
indexed_payload_attestation = get_indexed_payload_attestation(
|
||||
state, data.slot, payload_attestation
|
||||
)
|
||||
assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
|
||||
|
||||
if state.slot % SLOTS_PER_EPOCH == 0:
|
||||
@@ -605,7 +623,9 @@ def process_payload_attestation(state: BeaconState, payload_attestation: Payload
|
||||
# Return early if the attestation is for the wrong payload status
|
||||
payload_was_present = data.slot == state.latest_full_slot
|
||||
voted_present = data.payload_status == PAYLOAD_PRESENT
|
||||
proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
proposer_reward_denominator = (
|
||||
(WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
)
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
if voted_present != payload_was_present:
|
||||
# Unset the flags in case they were set by an equivocating ptc attestation
|
||||
@@ -664,7 +684,10 @@ def validate_merge_block(block: BeaconBlock) -> None:
|
||||
if TERMINAL_BLOCK_HASH != Hash32():
|
||||
# If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
assert block.body.signed_execution_payload_header.message.parent_block_hash == TERMINAL_BLOCK_HASH
|
||||
assert (
|
||||
block.body.signed_execution_payload_header.message.parent_block_hash
|
||||
== TERMINAL_BLOCK_HASH
|
||||
)
|
||||
return
|
||||
|
||||
# Modified in EIP-7732
|
||||
@@ -684,9 +707,12 @@ def validate_merge_block(block: BeaconBlock) -> None:
|
||||
|
||||
```python
|
||||
def verify_execution_payload_envelope_signature(
|
||||
state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope) -> bool:
|
||||
state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
|
||||
) -> bool:
|
||||
builder = state.validators[signed_envelope.message.builder_index]
|
||||
signing_root = compute_signing_root(signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER))
|
||||
signing_root = compute_signing_root(
|
||||
signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||
)
|
||||
return bls.Verify(builder.pubkey, signing_root, signed_envelope.signature)
|
||||
```
|
||||
|
||||
@@ -697,9 +723,12 @@ transition. It is called when importing a signed execution payload proposed by
|
||||
the builder of the current slot.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState,
|
||||
signed_envelope: SignedExecutionPayloadEnvelope,
|
||||
execution_engine: ExecutionEngine, verify: bool = True) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState,
|
||||
signed_envelope: SignedExecutionPayloadEnvelope,
|
||||
execution_engine: ExecutionEngine,
|
||||
verify: bool = True,
|
||||
) -> None:
|
||||
# Verify signature
|
||||
if verify:
|
||||
assert verify_execution_payload_envelope_signature(state, signed_envelope)
|
||||
@@ -716,7 +745,9 @@ def process_execution_payload(state: BeaconState,
|
||||
# Verify consistency with the committed header
|
||||
committed_header = state.latest_execution_payload_header
|
||||
assert envelope.builder_index == committed_header.builder_index
|
||||
assert committed_header.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
|
||||
assert committed_header.blob_kzg_commitments_root == hash_tree_root(
|
||||
envelope.blob_kzg_commitments
|
||||
)
|
||||
|
||||
if not envelope.payload_withheld:
|
||||
# Verify the withdrawals root
|
||||
@@ -735,8 +766,10 @@ def process_execution_payload(state: BeaconState,
|
||||
# Verify commitments are under limit
|
||||
assert len(envelope.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
|
||||
# Verify the execution payload is valid
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment)
|
||||
for commitment in envelope.blob_kzg_commitments]
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment)
|
||||
for commitment in envelope.blob_kzg_commitments
|
||||
]
|
||||
requests = envelope.execution_requests
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(
|
||||
|
||||
@@ -70,7 +70,8 @@ using
|
||||
|
||||
```python
|
||||
def get_execution_payload_header_signature(
|
||||
state: BeaconState, header: ExecutionPayloadHeader, privkey: int) -> BLSSignature:
|
||||
state: BeaconState, header: ExecutionPayloadHeader, privkey: int
|
||||
) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BEACON_BUILDER, compute_epoch_at_slot(header.slot))
|
||||
signing_root = compute_signing_root(header, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
@@ -91,10 +92,12 @@ included in the beacon block but rather in the `ExecutionPayloadEnvelope`, the
|
||||
builder has to send the commitments as parameters to this function.
|
||||
|
||||
```python
|
||||
def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
|
||||
def get_blob_sidecars(
|
||||
signed_block: SignedBeaconBlock,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
blob_kzg_proofs: Sequence[KZGProof],
|
||||
) -> Sequence[BlobSidecar]:
|
||||
block = signed_block.message
|
||||
block_header = BeaconBlockHeader(
|
||||
slot=block.slot,
|
||||
@@ -103,7 +106,9 @@ def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
state_root=block.state_root,
|
||||
body_root=hash_tree_root(block.body),
|
||||
)
|
||||
signed_block_header = SignedBeaconBlockHeader(message=block_header, signature=signed_block.signature)
|
||||
signed_block_header = SignedBeaconBlockHeader(
|
||||
message=block_header, signature=signed_block.signature
|
||||
)
|
||||
sidecars: List[BlobSidecar] = []
|
||||
for index, blob in enumerate(blobs):
|
||||
proof = compute_merkle_proof(
|
||||
@@ -126,7 +131,7 @@ def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
kzg_commitment=blob_kzg_commitments[index],
|
||||
kzg_proof=blob_kzg_proofs[index],
|
||||
signed_block_header=signed_block_header,
|
||||
kzg_commitment_inclusion_proof=proof
|
||||
kzg_commitment_inclusion_proof=proof,
|
||||
)
|
||||
)
|
||||
return sidecars
|
||||
@@ -165,7 +170,8 @@ After preparing the `envelope` the builder should sign the envelope using:
|
||||
|
||||
```python
|
||||
def get_execution_payload_envelope_signature(
|
||||
state: BeaconState, envelope: ExecutionPayloadEnvelope, privkey: int) -> BLSSignature:
|
||||
state: BeaconState, envelope: ExecutionPayloadEnvelope, privkey: int
|
||||
) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BEACON_BUILDER, compute_epoch_at_slot(state.slot))
|
||||
signing_root = compute_signing_root(envelope, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
|
||||
@@ -84,10 +84,14 @@ equivocating. Notice also that target epoch number and slot number are validated
|
||||
on `validate_on_attestation`.
|
||||
|
||||
```python
|
||||
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
|
||||
def update_latest_messages(
|
||||
store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation
|
||||
) -> None:
|
||||
slot = attestation.data.slot
|
||||
beacon_block_root = attestation.data.beacon_block_root
|
||||
non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices]
|
||||
non_equivocating_attesting_indices = [
|
||||
i for i in attesting_indices if i not in store.equivocating_indices
|
||||
]
|
||||
for i in non_equivocating_attesting_indices:
|
||||
if i not in store.latest_messages or slot > store.latest_messages[i].slot:
|
||||
store.latest_messages[i] = LatestMessage(slot=slot, root=beacon_block_root)
|
||||
@@ -119,7 +123,9 @@ class Store(object):
|
||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
||||
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
||||
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
|
||||
execution_payload_states: Dict[Root, BeaconState] = field(default_factory=dict) # [New in EIP-7732]
|
||||
execution_payload_states: Dict[Root, BeaconState] = field(
|
||||
default_factory=dict
|
||||
) # [New in EIP-7732]
|
||||
ptc_vote: Dict[Root, Vector[uint8, PTC_SIZE]] = field(default_factory=dict) # [New in EIP-7732]
|
||||
```
|
||||
|
||||
@@ -157,7 +163,9 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
|
||||
### `notify_ptc_messages`
|
||||
|
||||
```python
|
||||
def notify_ptc_messages(store: Store, state: BeaconState, payload_attestations: Sequence[PayloadAttestation]) -> None:
|
||||
def notify_ptc_messages(
|
||||
store: Store, state: BeaconState, payload_attestations: Sequence[PayloadAttestation]
|
||||
) -> None:
|
||||
"""
|
||||
Extracts a list of ``PayloadAttestationMessage`` from ``payload_attestations`` and updates the store with them
|
||||
These Payload attestations are assumed to be in the beacon block hence signature verification is not needed
|
||||
@@ -165,7 +173,9 @@ def notify_ptc_messages(store: Store, state: BeaconState, payload_attestations:
|
||||
if state.slot == 0:
|
||||
return
|
||||
for payload_attestation in payload_attestations:
|
||||
indexed_payload_attestation = get_indexed_payload_attestation(state, Slot(state.slot - 1), payload_attestation)
|
||||
indexed_payload_attestation = get_indexed_payload_attestation(
|
||||
state, Slot(state.slot - 1), payload_attestation
|
||||
)
|
||||
for idx in indexed_payload_attestation.attesting_indices:
|
||||
on_payload_attestation_message(
|
||||
store,
|
||||
@@ -173,8 +183,8 @@ def notify_ptc_messages(store: Store, state: BeaconState, payload_attestations:
|
||||
validator_index=idx,
|
||||
data=payload_attestation.data,
|
||||
signature=BLSSignature(),
|
||||
is_from_block=True
|
||||
)
|
||||
is_from_block=True,
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
@@ -220,7 +230,11 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> ChildNode:
|
||||
parent = store.blocks[block.parent_root]
|
||||
if parent.slot > slot:
|
||||
return get_ancestor(store, block.parent_root, slot)
|
||||
return ChildNode(root=block.parent_root, slot=parent.slot, is_payload_present=is_parent_node_full(store, block))
|
||||
return ChildNode(
|
||||
root=block.parent_root,
|
||||
slot=parent.slot,
|
||||
is_payload_present=is_parent_node_full(store, block),
|
||||
)
|
||||
```
|
||||
|
||||
### Modified `get_checkpoint_block`
|
||||
@@ -274,7 +288,9 @@ def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) ->
|
||||
# Proposer boost is not applied after skipped slots
|
||||
if node.slot > proposer_boost_slot:
|
||||
return Gwei(0)
|
||||
if (node.slot < proposer_boost_slot) and (ancestor.is_payload_present != node.is_payload_present):
|
||||
if (node.slot < proposer_boost_slot) and (
|
||||
ancestor.is_payload_present != node.is_payload_present
|
||||
):
|
||||
return Gwei(0)
|
||||
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
return (committee_weight * PROPOSER_SCORE_BOOST_EIP7732) // 100
|
||||
@@ -333,15 +349,21 @@ the block was full or not. `Slot` is needed for a correct implementation of
|
||||
def get_weight(store: Store, node: ChildNode) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
unslashed_and_active_indices = [
|
||||
i for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
i
|
||||
for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
if not state.validators[i].slashed
|
||||
]
|
||||
attestation_score = Gwei(sum(
|
||||
state.validators[i].effective_balance for i in unslashed_and_active_indices
|
||||
if (i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and is_supporting_vote(store, node, store.latest_messages[i]))
|
||||
))
|
||||
attestation_score = Gwei(
|
||||
sum(
|
||||
state.validators[i].effective_balance
|
||||
for i in unslashed_and_active_indices
|
||||
if (
|
||||
i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and is_supporting_vote(store, node, store.latest_messages[i])
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Compute boosts
|
||||
proposer_score = compute_proposer_boost(store, state, node)
|
||||
@@ -365,33 +387,47 @@ def get_head(store: Store) -> ChildNode:
|
||||
justified_block = store.blocks[justified_root]
|
||||
justified_slot = justified_block.slot
|
||||
justified_full = is_payload_present(store, justified_root)
|
||||
best_child = ChildNode(root=justified_root, slot=justified_slot, is_payload_present=justified_full)
|
||||
best_child = ChildNode(
|
||||
root=justified_root, slot=justified_slot, is_payload_present=justified_full
|
||||
)
|
||||
while True:
|
||||
children = [
|
||||
ChildNode(root=root, slot=block.slot, is_payload_present=present) for (root, block) in blocks.items()
|
||||
if block.parent_root == best_child.root and block.slot > best_child.slot and
|
||||
(best_child.root == justified_root or is_parent_node_full(store, block) == best_child.is_payload_present)
|
||||
for present in (True, False) if root in store.execution_payload_states or not present
|
||||
ChildNode(root=root, slot=block.slot, is_payload_present=present)
|
||||
for (root, block) in blocks.items()
|
||||
if block.parent_root == best_child.root
|
||||
and block.slot > best_child.slot
|
||||
and (
|
||||
best_child.root == justified_root
|
||||
or is_parent_node_full(store, block) == best_child.is_payload_present
|
||||
)
|
||||
for present in (True, False)
|
||||
if root in store.execution_payload_states or not present
|
||||
]
|
||||
if len(children) == 0:
|
||||
return best_child
|
||||
# if we have children we consider the current head advanced as a possible head
|
||||
highest_child_slot = max(child.slot for child in children)
|
||||
children += [
|
||||
ChildNode(root=best_child.root, slot=best_child.slot + 1, is_payload_present=best_child.is_payload_present)
|
||||
ChildNode(
|
||||
root=best_child.root,
|
||||
slot=best_child.slot + 1,
|
||||
is_payload_present=best_child.is_payload_present,
|
||||
)
|
||||
]
|
||||
# Sort by latest attesting balance with
|
||||
# Ties broken by the block's slot
|
||||
# Ties are broken by the PTC vote
|
||||
# Ties are then broken by favoring full blocks
|
||||
# Ties then broken by favoring block with lexicographically higher root
|
||||
new_best_child = max(children, key=lambda child: (
|
||||
get_weight(store, child),
|
||||
blocks[child.root].slot,
|
||||
is_payload_present(store, child.root),
|
||||
child.is_payload_present,
|
||||
child.root
|
||||
)
|
||||
new_best_child = max(
|
||||
children,
|
||||
key=lambda child: (
|
||||
get_weight(store, child),
|
||||
blocks[child.root].slot,
|
||||
is_payload_present(store, child.root),
|
||||
child.is_payload_present,
|
||||
child.root,
|
||||
),
|
||||
)
|
||||
if new_best_child.root == best_child.root and new_best_child.slot >= highest_child_slot:
|
||||
return new_best_child
|
||||
@@ -536,14 +572,17 @@ def on_tick_per_slot(store: Store, time: uint64) -> None:
|
||||
|
||||
# If a new epoch, pull-up justification and finalization from previous epoch
|
||||
if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0:
|
||||
update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint)
|
||||
update_checkpoints(
|
||||
store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint
|
||||
)
|
||||
```
|
||||
|
||||
### `on_payload_attestation_message`
|
||||
|
||||
```python
|
||||
def on_payload_attestation_message(
|
||||
store: Store, ptc_message: PayloadAttestationMessage, is_from_block: bool=False) -> None:
|
||||
store: Store, ptc_message: PayloadAttestationMessage, is_from_block: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Run ``on_payload_attestation_message`` upon receiving a new ``ptc_message`` directly on the wire.
|
||||
"""
|
||||
@@ -568,8 +607,8 @@ def on_payload_attestation_message(
|
||||
IndexedPayloadAttestation(
|
||||
attesting_indices=[ptc_message.validator_index],
|
||||
data=data,
|
||||
signature=ptc_message.signature
|
||||
)
|
||||
signature=ptc_message.signature,
|
||||
),
|
||||
)
|
||||
# Update the ptc vote for the block
|
||||
ptc_index = ptc.index(ptc_message.validator_index)
|
||||
@@ -608,7 +647,10 @@ def validate_merge_block(block: BeaconBlock) -> None:
|
||||
if TERMINAL_BLOCK_HASH != Hash32():
|
||||
# If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||
assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
assert block.body.signed_execution_payload_header.message.parent_block_hash == TERMINAL_BLOCK_HASH
|
||||
assert (
|
||||
block.body.signed_execution_payload_header.message.parent_block_hash
|
||||
== TERMINAL_BLOCK_HASH
|
||||
)
|
||||
return
|
||||
|
||||
pow_block = get_pow_block(block.body.signed_execution_payload_header.message.parent_block_hash)
|
||||
@@ -619,7 +661,4 @@ def validate_merge_block(block: BeaconBlock) -> None:
|
||||
assert pow_parent is not None
|
||||
# Check if `pow_block` is a valid terminal PoW block
|
||||
assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -72,50 +72,39 @@ def upgrade_to_eip7732(pre: electra.BeaconState) -> BeaconState:
|
||||
epoch = electra.get_current_epoch(pre)
|
||||
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=EIP7732_FORK_VERSION, # [Modified in EIP-7732]
|
||||
# [Modified in EIP-7732]
|
||||
current_version=EIP7732_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=ExecutionPayloadHeader(), # [Modified in EIP-7732]
|
||||
# Withdrawals
|
||||
# [Modified in EIP-7732]
|
||||
latest_execution_payload_header=ExecutionPayloadHeader(),
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
deposit_requests_start_index=pre.deposit_requests_start_index,
|
||||
deposit_balance_to_consume=pre.deposit_balance_to_consume,
|
||||
@@ -126,10 +115,12 @@ def upgrade_to_eip7732(pre: electra.BeaconState) -> BeaconState:
|
||||
pending_deposits=pre.pending_deposits,
|
||||
pending_partial_withdrawals=pre.pending_partial_withdrawals,
|
||||
pending_consolidations=pre.pending_consolidations,
|
||||
# ePBS
|
||||
latest_block_hash=pre.latest_execution_payload_header.block_hash, # [New in EIP-7732]
|
||||
latest_full_slot=pre.slot, # [New in EIP-7732]
|
||||
latest_withdrawals_root=Root(), # [New in EIP-7732]
|
||||
# [New in EIP-7732]
|
||||
latest_block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
# [New in EIP-7732]
|
||||
latest_full_slot=pre.slot,
|
||||
# [New in EIP-7732]
|
||||
latest_withdrawals_root=Root(),
|
||||
)
|
||||
|
||||
return post
|
||||
|
||||
@@ -82,8 +82,7 @@ no longer in the beacon block body.
|
||||
```python
|
||||
def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool:
|
||||
inner_gindex = get_generalized_index(
|
||||
List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
blob_sidecar.index
|
||||
List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK], blob_sidecar.index
|
||||
)
|
||||
outer_gindex = get_generalized_index(
|
||||
BeaconBlockBody,
|
||||
|
||||
@@ -35,9 +35,8 @@ next epoch.
|
||||
|
||||
```python
|
||||
def get_ptc_assignment(
|
||||
state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex) -> Optional[Slot]:
|
||||
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex
|
||||
) -> Optional[Slot]:
|
||||
"""
|
||||
Returns the slot during the requested epoch in which the validator with index `validator_index`
|
||||
is a member of the PTC. Returns None if no assignment is found.
|
||||
@@ -184,7 +183,8 @@ attestations as described above.
|
||||
|
||||
```python
|
||||
def get_payload_attestation_message_signature(
|
||||
state: BeaconState, attestation: PayloadAttestationMessage, privkey: int) -> BLSSignature:
|
||||
state: BeaconState, attestation: PayloadAttestationMessage, privkey: int
|
||||
) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
|
||||
signing_root = compute_signing_root(attestation.data, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
|
||||
@@ -82,8 +82,8 @@ class SignedInclusionList(Container):
|
||||
|
||||
```python
|
||||
def is_valid_inclusion_list_signature(
|
||||
state: BeaconState,
|
||||
signed_inclusion_list: SignedInclusionList) -> bool:
|
||||
state: BeaconState, signed_inclusion_list: SignedInclusionList
|
||||
) -> bool:
|
||||
"""
|
||||
Check if ``signed_inclusion_list`` has a valid signature.
|
||||
"""
|
||||
@@ -100,8 +100,9 @@ def is_valid_inclusion_list_signature(
|
||||
#### New `get_inclusion_list_committee`
|
||||
|
||||
```python
|
||||
def get_inclusion_list_committee(state: BeaconState,
|
||||
slot: Slot) -> Vector[ValidatorIndex, INCLUSION_LIST_COMMITTEE_SIZE]:
|
||||
def get_inclusion_list_committee(
|
||||
state: BeaconState, slot: Slot
|
||||
) -> Vector[ValidatorIndex, INCLUSION_LIST_COMMITTEE_SIZE]:
|
||||
epoch = compute_epoch_at_slot(slot)
|
||||
seed = get_seed(state, epoch, DOMAIN_INCLUSION_LIST_COMMITTEE)
|
||||
indices = get_active_validator_indices(state, epoch)
|
||||
@@ -139,11 +140,13 @@ class NewPayloadRequest(object):
|
||||
`inclusion_list_transactions`.
|
||||
|
||||
```python
|
||||
def is_valid_block_hash(self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes],
|
||||
inclusion_list_transactions: Sequence[Transaction]) -> bool:
|
||||
def is_valid_block_hash(
|
||||
self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes],
|
||||
inclusion_list_transactions: Sequence[Transaction],
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload.block_hash`` is computed correctly.
|
||||
"""
|
||||
@@ -156,11 +159,13 @@ def is_valid_block_hash(self: ExecutionEngine,
|
||||
`inclusion_list_transactions`.
|
||||
|
||||
```python
|
||||
def notify_new_payload(self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes],
|
||||
inclusion_list_transactions: Sequence[Transaction]) -> bool:
|
||||
def notify_new_payload(
|
||||
self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes],
|
||||
inclusion_list_transactions: Sequence[Transaction],
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload`` and ``execution_requests_list``
|
||||
are valid with respect to ``self.execution_state``.
|
||||
@@ -178,23 +183,24 @@ additional parameter `inclusion_list_transactions` when calling
|
||||
`notify_new_payload` in EIP-7805.
|
||||
|
||||
```python
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
def verify_and_notify_new_payload(
|
||||
self: ExecutionEngine, new_payload_request: NewPayloadRequest
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``new_payload_request`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
execution_payload = new_payload_request.execution_payload
|
||||
parent_beacon_block_root = new_payload_request.parent_beacon_block_root
|
||||
execution_requests_list = get_execution_requests_list(new_payload_request.execution_requests)
|
||||
inclusion_list_transactions = new_payload_request.inclusion_list_transactions # [New in EIP-7805]
|
||||
# [New in EIP-7805]
|
||||
inclusion_list_transactions = new_payload_request.inclusion_list_transactions
|
||||
|
||||
if b'' in execution_payload.transactions:
|
||||
if b"" in execution_payload.transactions:
|
||||
return False
|
||||
|
||||
if not self.is_valid_block_hash(
|
||||
execution_payload,
|
||||
parent_beacon_block_root,
|
||||
execution_requests_list):
|
||||
execution_payload, parent_beacon_block_root, execution_requests_list
|
||||
):
|
||||
return False
|
||||
|
||||
if not self.is_valid_versioned_hashes(new_payload_request):
|
||||
@@ -202,10 +208,11 @@ def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
|
||||
# [Modified in EIP-7805]
|
||||
if not self.notify_new_payload(
|
||||
execution_payload,
|
||||
parent_beacon_block_root,
|
||||
execution_requests_list,
|
||||
inclusion_list_transactions):
|
||||
execution_payload,
|
||||
parent_beacon_block_root,
|
||||
execution_requests_list,
|
||||
inclusion_list_transactions,
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -214,7 +221,9 @@ def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
##### Modified `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -226,7 +235,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
# Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA
|
||||
# Verify the execution payload is valid
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments
|
||||
]
|
||||
# Verify inclusion list transactions
|
||||
inclusion_list_transactions: Sequence[Transaction] = [] # TODO: where do we get this?
|
||||
# Verify the payload with the execution engine
|
||||
|
||||
@@ -56,7 +56,9 @@ class Store(object):
|
||||
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
|
||||
# [New in EIP-7805]
|
||||
inclusion_lists: Dict[Tuple[Slot, Root], Set[InclusionList]] = field(default_factory=dict)
|
||||
inclusion_list_equivocators: Dict[Tuple[Slot, Root], Set[ValidatorIndex]] = field(default_factory=dict)
|
||||
inclusion_list_equivocators: Dict[Tuple[Slot, Root], Set[ValidatorIndex]] = field(
|
||||
default_factory=dict
|
||||
)
|
||||
unsatisfied_inclusion_list_blocks: Set[Root] = field(default_factory=Set)
|
||||
```
|
||||
|
||||
@@ -91,16 +93,20 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
|
||||
#### New `validate_inclusion_lists`
|
||||
|
||||
```python
|
||||
def validate_inclusion_lists(_store: Store,
|
||||
inclusion_list_transactions: Sequence[Transaction],
|
||||
execution_payload: ExecutionPayload) -> None:
|
||||
def validate_inclusion_lists(
|
||||
_store: Store,
|
||||
inclusion_list_transactions: Sequence[Transaction],
|
||||
execution_payload: ExecutionPayload,
|
||||
) -> None:
|
||||
"""
|
||||
The ``execution_payload`` satisfies ``inclusion_list_transactions`` validity conditions either
|
||||
when all transactions are present in payload or when any missing transactions are found to be
|
||||
invalid when appended to the end of the payload unless the block is full.
|
||||
"""
|
||||
# Verify inclusion list transactions are present in the execution payload
|
||||
contains_all_txs = all(tx in execution_payload.transactions for tx in inclusion_list_transactions)
|
||||
contains_all_txs = all(
|
||||
tx in execution_payload.transactions for tx in inclusion_list_transactions
|
||||
)
|
||||
if contains_all_txs:
|
||||
return
|
||||
|
||||
@@ -116,7 +122,6 @@ def get_attester_head(store: Store, head_root: Root) -> Root:
|
||||
if head_root in store.unsatisfied_inclusion_list_blocks:
|
||||
return head_block.parent_root
|
||||
return head_root
|
||||
|
||||
```
|
||||
|
||||
##### Modified `get_proposer_head`
|
||||
@@ -157,11 +162,22 @@ def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root:
|
||||
# Check that the missing votes are assigned to the parent and not being hoarded.
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
|
||||
reorg_prerequisites = all([shuffling_stable, ffg_competitive, finalization_ok,
|
||||
proposing_on_time, single_slot_reorg, head_weak, parent_strong])
|
||||
reorg_prerequisites = all(
|
||||
[
|
||||
shuffling_stable,
|
||||
ffg_competitive,
|
||||
finalization_ok,
|
||||
proposing_on_time,
|
||||
single_slot_reorg,
|
||||
head_weak,
|
||||
parent_strong,
|
||||
]
|
||||
)
|
||||
|
||||
# Check that the head block is in the unsatisfied inclusion list blocks
|
||||
inclusion_list_not_satisfied = head_root in store.unsatisfied_inclusion_list_blocks # [New in EIP-7805]
|
||||
inclusion_list_not_satisfied = (
|
||||
head_root in store.unsatisfied_inclusion_list_blocks
|
||||
) # [New in EIP-7805]
|
||||
|
||||
if reorg_prerequisites and (head_late or inclusion_list_not_satisfied):
|
||||
return parent_root
|
||||
@@ -176,10 +192,11 @@ choice store.
|
||||
|
||||
```python
|
||||
def on_inclusion_list(
|
||||
store: Store,
|
||||
state: BeaconState,
|
||||
signed_inclusion_list: SignedInclusionList,
|
||||
inclusion_list_committee: Vector[ValidatorIndex, INCLUSION_LIST_COMMITTEE_SIZE]) -> None:
|
||||
store: Store,
|
||||
state: BeaconState,
|
||||
signed_inclusion_list: SignedInclusionList,
|
||||
inclusion_list_committee: Vector[ValidatorIndex, INCLUSION_LIST_COMMITTEE_SIZE],
|
||||
) -> None:
|
||||
"""
|
||||
Verify the inclusion list and import it into the fork choice store. If there exists more than
|
||||
one inclusion list in the store with the same slot and validator index, add the equivocator to
|
||||
@@ -209,13 +226,18 @@ def on_inclusion_list(
|
||||
# Verify inclusion list signature
|
||||
assert is_valid_inclusion_list_signature(state, signed_inclusion_list)
|
||||
|
||||
is_before_freeze_deadline = get_current_slot(store) == message.slot and time_into_slot < VIEW_FREEZE_DEADLINE
|
||||
is_before_freeze_deadline = (
|
||||
get_current_slot(store) == message.slot and time_into_slot < VIEW_FREEZE_DEADLINE
|
||||
)
|
||||
|
||||
# Do not process inclusion lists from known equivocators
|
||||
if validator_index not in store.inclusion_list_equivocators[(message.slot, root)]:
|
||||
if validator_index in [il.validator_index for il in store.inclusion_lists[(message.slot, root)]]:
|
||||
if validator_index in [
|
||||
il.validator_index for il in store.inclusion_lists[(message.slot, root)]
|
||||
]:
|
||||
validator_inclusion_list = [
|
||||
il for il in store.inclusion_lists[(message.slot, root)]
|
||||
il
|
||||
for il in store.inclusion_lists[(message.slot, root)]
|
||||
if il.validator_index == validator_index
|
||||
][0]
|
||||
if validator_inclusion_list != message:
|
||||
|
||||
@@ -66,54 +66,40 @@ def upgrade_to_eip7805(pre: electra.BeaconState) -> BeaconState:
|
||||
epoch = electra.get_current_epoch(pre)
|
||||
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=EIP7805_FORK_VERSION, # [Modified in EIP-7805]
|
||||
# [Modified in EIP-7805]
|
||||
current_version=EIP7805_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
# On-chain deposits
|
||||
deposit_requests_start_index=pre.deposit_requests_start_index,
|
||||
# Consolidations
|
||||
deposit_balance_to_consume=pre.deposit_balance_to_consume,
|
||||
exit_balance_to_consume=pre.exit_balance_to_consume,
|
||||
earliest_exit_epoch=pre.earliest_exit_epoch,
|
||||
|
||||
@@ -69,9 +69,8 @@ current and next epoch.
|
||||
|
||||
```python
|
||||
def get_inclusion_committee_assignment(
|
||||
state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex) -> Optional[Slot]:
|
||||
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex
|
||||
) -> Optional[Slot]:
|
||||
"""
|
||||
Returns the slot during the requested epoch in which the validator with index ``validator_index``
|
||||
is a member of the ILC. Returns None if no assignment is found.
|
||||
@@ -137,8 +136,11 @@ The validator creates the `signed_inclusion_list` as follows:
|
||||
|
||||
```python
|
||||
def get_inclusion_list_signature(
|
||||
state: BeaconState, inclusion_list: InclusionList, privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_INCLUSION_LIST_COMMITTEE, compute_epoch_at_slot(inclusion_list.slot))
|
||||
state: BeaconState, inclusion_list: InclusionList, privkey: int
|
||||
) -> BLSSignature:
|
||||
domain = get_domain(
|
||||
state, DOMAIN_INCLUSION_LIST_COMMITTEE, compute_epoch_at_slot(inclusion_list.slot)
|
||||
)
|
||||
signing_root = compute_signing_root(inclusion_list, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
@@ -154,11 +156,13 @@ Set `attestation_data.beacon_block_root = get_attester_head(store, head_root)`.
|
||||
#### Modified beacon block root
|
||||
|
||||
```python
|
||||
def get_sync_committee_message(state: BeaconState,
|
||||
block_root: Root,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int,
|
||||
store: Store) -> SyncCommitteeMessage:
|
||||
def get_sync_committee_message(
|
||||
state: BeaconState,
|
||||
block_root: Root,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int,
|
||||
store: Store,
|
||||
) -> SyncCommitteeMessage:
|
||||
epoch = get_current_epoch(state)
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch)
|
||||
signing_root = compute_signing_root(block_root, domain)
|
||||
|
||||
@@ -278,7 +278,9 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd
|
||||
i = 0
|
||||
sync_committee_indices: List[ValidatorIndex] = []
|
||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||
shuffled_index = compute_shuffled_index(
|
||||
uint64(i % active_validator_count), active_validator_count, seed
|
||||
)
|
||||
candidate_index = active_validator_indices[shuffled_index]
|
||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
@@ -309,7 +311,11 @@ def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
|
||||
|
||||
```python
|
||||
def get_base_reward_per_increment(state: BeaconState) -> Gwei:
|
||||
return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
|
||||
return Gwei(
|
||||
EFFECTIVE_BALANCE_INCREMENT
|
||||
* BASE_REWARD_FACTOR
|
||||
// integer_squareroot(get_total_active_balance(state))
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_base_reward`
|
||||
@@ -332,7 +338,9 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
#### `get_unslashed_participating_indices`
|
||||
|
||||
```python
|
||||
def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epoch: Epoch) -> Set[ValidatorIndex]:
|
||||
def get_unslashed_participating_indices(
|
||||
state: BeaconState, flag_index: int, epoch: Epoch
|
||||
) -> Set[ValidatorIndex]:
|
||||
"""
|
||||
Return the set of validator indices that are both active and unslashed for the given ``flag_index`` and ``epoch``.
|
||||
"""
|
||||
@@ -342,16 +350,18 @@ def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epo
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
|
||||
participating_indices = [
|
||||
i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)
|
||||
]
|
||||
return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
|
||||
```
|
||||
|
||||
#### `get_attestation_participation_flag_indices`
|
||||
|
||||
```python
|
||||
def get_attestation_participation_flag_indices(state: BeaconState,
|
||||
data: AttestationData,
|
||||
inclusion_delay: uint64) -> Sequence[int]:
|
||||
def get_attestation_participation_flag_indices(
|
||||
state: BeaconState, data: AttestationData, inclusion_delay: uint64
|
||||
) -> Sequence[int]:
|
||||
"""
|
||||
Return the flag indices that are satisfied by an attestation.
|
||||
"""
|
||||
@@ -362,8 +372,12 @@ def get_attestation_participation_flag_indices(state: BeaconState,
|
||||
|
||||
# Matching roots
|
||||
is_matching_source = data.source == justified_checkpoint
|
||||
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
|
||||
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
|
||||
is_matching_target = is_matching_source and data.target.root == get_block_root(
|
||||
state, data.target.epoch
|
||||
)
|
||||
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(
|
||||
state, data.slot
|
||||
)
|
||||
assert is_matching_source
|
||||
|
||||
participation_flag_indices = []
|
||||
@@ -380,17 +394,23 @@ def get_attestation_participation_flag_indices(state: BeaconState,
|
||||
#### `get_flag_index_deltas`
|
||||
|
||||
```python
|
||||
def get_flag_index_deltas(state: BeaconState, flag_index: int) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
def get_flag_index_deltas(
|
||||
state: BeaconState, flag_index: int
|
||||
) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Return the deltas for a given ``flag_index`` by scanning through the participation flags.
|
||||
"""
|
||||
rewards = [Gwei(0)] * len(state.validators)
|
||||
penalties = [Gwei(0)] * len(state.validators)
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, previous_epoch)
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(
|
||||
state, flag_index, previous_epoch
|
||||
)
|
||||
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index]
|
||||
unslashed_participating_balance = get_total_balance(state, unslashed_participating_indices)
|
||||
unslashed_participating_increments = unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
unslashed_participating_increments = (
|
||||
unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||
for index in get_eligible_validator_indices(state):
|
||||
base_reward = get_base_reward(state, index)
|
||||
@@ -413,10 +433,14 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
matching_target_indices = get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, previous_epoch
|
||||
)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
if index not in matching_target_indices:
|
||||
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
penalty_numerator = (
|
||||
state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
)
|
||||
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||
penalties[index] += Gwei(penalty_numerator // penalty_denominator)
|
||||
return rewards, penalties
|
||||
@@ -431,9 +455,9 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
calculating the proposer reward.
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
def slash_validator(
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
@@ -441,9 +465,13 @@ def slash_validator(state: BeaconState,
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
validator.withdrawable_epoch = max(
|
||||
validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)
|
||||
)
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR)
|
||||
decrease_balance(
|
||||
state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR
|
||||
)
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
@@ -483,7 +511,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
# Participation flag indices
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(
|
||||
state, data, state.slot - data.slot
|
||||
)
|
||||
|
||||
# Verify signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
@@ -497,12 +527,16 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, attestation):
|
||||
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
if flag_index in participation_flag_indices and not has_flag(
|
||||
epoch_participation[index], flag_index
|
||||
):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
|
||||
# Reward proposer
|
||||
proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
proposer_reward_denominator = (
|
||||
(WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
)
|
||||
proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
@@ -514,10 +548,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
`current_epoch_participation`.
|
||||
|
||||
```python
|
||||
def add_validator_to_registry(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64) -> None:
|
||||
def add_validator_to_registry(
|
||||
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> None:
|
||||
index = get_index_for_new_validator(state)
|
||||
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount)
|
||||
set_or_append_list(state.validators, index, validator)
|
||||
@@ -536,23 +569,35 @@ def add_validator_to_registry(state: BeaconState,
|
||||
def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None:
|
||||
# Verify sync committee aggregate signature signing over the previous slot block root
|
||||
committee_pubkeys = state.current_sync_committee.pubkeys
|
||||
participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit]
|
||||
participant_pubkeys = [
|
||||
pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit
|
||||
]
|
||||
previous_slot = max(state.slot, Slot(1)) - Slot(1)
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
||||
assert eth_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||
assert eth_fast_aggregate_verify(
|
||||
participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature
|
||||
)
|
||||
|
||||
# Compute participant and proposer rewards
|
||||
total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
|
||||
max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
|
||||
max_participant_rewards = Gwei(
|
||||
total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH
|
||||
)
|
||||
participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
|
||||
proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
|
||||
proposer_reward = Gwei(
|
||||
participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
|
||||
)
|
||||
|
||||
# Apply participant and proposer rewards
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
|
||||
for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits):
|
||||
committee_indices = [
|
||||
ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys
|
||||
]
|
||||
for participant_index, participation_bit in zip(
|
||||
committee_indices, sync_aggregate.sync_committee_bits
|
||||
):
|
||||
if participation_bit:
|
||||
increase_balance(state, participant_index, participant_reward)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
@@ -589,12 +634,18 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
return
|
||||
previous_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state))
|
||||
current_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_current_epoch(state))
|
||||
previous_indices = get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)
|
||||
)
|
||||
current_indices = get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, get_current_epoch(state)
|
||||
)
|
||||
total_active_balance = get_total_active_balance(state)
|
||||
previous_target_balance = get_total_balance(state, previous_indices)
|
||||
current_target_balance = get_total_balance(state, current_indices)
|
||||
weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
|
||||
weigh_justification_and_finalization(
|
||||
state, total_active_balance, previous_target_balance, current_target_balance
|
||||
)
|
||||
```
|
||||
|
||||
#### Inactivity scores
|
||||
@@ -609,13 +660,17 @@ def process_inactivity_updates(state: BeaconState) -> None:
|
||||
|
||||
for index in get_eligible_validator_indices(state):
|
||||
# Increase the inactivity score of inactive validators
|
||||
if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)):
|
||||
if index in get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)
|
||||
):
|
||||
state.inactivity_scores[index] -= min(1, state.inactivity_scores[index])
|
||||
else:
|
||||
state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS
|
||||
# Decrease the inactivity score of all eligible validators during a leak-free epoch
|
||||
if not is_in_inactivity_leak(state):
|
||||
state.inactivity_scores[index] -= min(INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index])
|
||||
state.inactivity_scores[index] -= min(
|
||||
INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index]
|
||||
)
|
||||
```
|
||||
|
||||
#### Rewards and penalties
|
||||
@@ -629,9 +684,12 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
|
||||
flag_deltas = [get_flag_index_deltas(state, flag_index) for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS))]
|
||||
flag_deltas = [
|
||||
get_flag_index_deltas(state, flag_index)
|
||||
for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS))
|
||||
]
|
||||
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||
for (rewards, penalties) in deltas:
|
||||
for rewards, penalties in deltas:
|
||||
for index in range(len(state.validators)):
|
||||
increase_balance(state, ValidatorIndex(index), rewards[index])
|
||||
decrease_balance(state, ValidatorIndex(index), penalties[index])
|
||||
@@ -646,11 +704,18 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
|
||||
adjusted_total_slashing_balance = min(
|
||||
sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance
|
||||
)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
if (
|
||||
validator.slashed
|
||||
and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch
|
||||
):
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
penalty_numerator = (
|
||||
validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
)
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
@@ -662,7 +727,9 @@ def process_slashings(state: BeaconState) -> None:
|
||||
```python
|
||||
def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
state.previous_epoch_participation = state.current_epoch_participation
|
||||
state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||
state.current_epoch_participation = [
|
||||
ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))
|
||||
]
|
||||
```
|
||||
|
||||
#### Sync committee updates
|
||||
|
||||
@@ -55,7 +55,9 @@ def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
||||
### `eth_fast_aggregate_verify`
|
||||
|
||||
```python
|
||||
def eth_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||
def eth_fast_aggregate_verify(
|
||||
pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature
|
||||
) -> bool:
|
||||
"""
|
||||
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||
"""
|
||||
|
||||
@@ -67,12 +67,16 @@ precise fork slot to execute the upgrade in the presence of skipped slots at the
|
||||
fork boundary. Instead the logic must be within `process_slots`.
|
||||
|
||||
```python
|
||||
def translate_participation(state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]) -> None:
|
||||
def translate_participation(
|
||||
state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]
|
||||
) -> None:
|
||||
for attestation in pending_attestations:
|
||||
data = attestation.data
|
||||
inclusion_delay = attestation.inclusion_delay
|
||||
# Translate attestation inclusion info to flag indices
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay)
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(
|
||||
state, data, inclusion_delay
|
||||
)
|
||||
|
||||
# Apply flags to all attesting validators
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
@@ -84,7 +88,6 @@ def translate_participation(state: BeaconState, pending_attestations: Sequence[p
|
||||
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||
epoch = phase0.get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
@@ -93,31 +96,27 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||
current_version=ALTAIR_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
|
||||
current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
|
||||
# Finality
|
||||
previous_epoch_participation=[
|
||||
ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))
|
||||
],
|
||||
current_epoch_participation=[
|
||||
ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))
|
||||
],
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
|
||||
)
|
||||
# Fill in previous epoch participation from the pre state's pending attestations
|
||||
|
||||
@@ -28,9 +28,7 @@ This function return the Merkle proof of the given SSZ object `object` at
|
||||
generalized index `index`.
|
||||
|
||||
```python
|
||||
def compute_merkle_proof(object: SSZObject,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
...
|
||||
def compute_merkle_proof(object: SSZObject, index: GeneralizedIndex) -> Sequence[Bytes32]: ...
|
||||
```
|
||||
|
||||
### `block_to_light_client_header`
|
||||
@@ -61,8 +59,9 @@ To form a `LightClientBootstrap`, the following objects are needed:
|
||||
- `block`: the corresponding block
|
||||
|
||||
```python
|
||||
def create_light_client_bootstrap(state: BeaconState,
|
||||
block: SignedBeaconBlock) -> LightClientBootstrap:
|
||||
def create_light_client_bootstrap(
|
||||
state: BeaconState, block: SignedBeaconBlock
|
||||
) -> LightClientBootstrap:
|
||||
assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH
|
||||
|
||||
assert state.slot == state.latest_block_header.slot
|
||||
@@ -74,7 +73,8 @@ def create_light_client_bootstrap(state: BeaconState,
|
||||
header=block_to_light_client_header(block),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
current_sync_committee_branch=CurrentSyncCommitteeBranch(
|
||||
compute_merkle_proof(state, current_sync_committee_gindex_at_slot(state.slot))),
|
||||
compute_merkle_proof(state, current_sync_committee_gindex_at_slot(state.slot))
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
@@ -106,13 +106,18 @@ needed:
|
||||
unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
|
||||
|
||||
```python
|
||||
def create_light_client_update(state: BeaconState,
|
||||
block: SignedBeaconBlock,
|
||||
attested_state: BeaconState,
|
||||
attested_block: SignedBeaconBlock,
|
||||
finalized_block: Optional[SignedBeaconBlock]) -> LightClientUpdate:
|
||||
def create_light_client_update(
|
||||
state: BeaconState,
|
||||
block: SignedBeaconBlock,
|
||||
attested_state: BeaconState,
|
||||
attested_block: SignedBeaconBlock,
|
||||
finalized_block: Optional[SignedBeaconBlock],
|
||||
) -> LightClientUpdate:
|
||||
assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
assert (
|
||||
sum(block.message.body.sync_aggregate.sync_committee_bits)
|
||||
>= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
)
|
||||
|
||||
assert state.slot == state.latest_block_header.slot
|
||||
header = state.latest_block_header.copy()
|
||||
@@ -123,7 +128,11 @@ def create_light_client_update(state: BeaconState,
|
||||
assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
attested_header = attested_state.latest_block_header.copy()
|
||||
attested_header.state_root = hash_tree_root(attested_state)
|
||||
assert hash_tree_root(attested_header) == hash_tree_root(attested_block.message) == block.message.parent_root
|
||||
assert (
|
||||
hash_tree_root(attested_header)
|
||||
== hash_tree_root(attested_block.message)
|
||||
== block.message.parent_root
|
||||
)
|
||||
update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot)
|
||||
|
||||
update = LightClientUpdate()
|
||||
@@ -134,17 +143,24 @@ def create_light_client_update(state: BeaconState,
|
||||
if update_attested_period == update_signature_period:
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = NextSyncCommitteeBranch(
|
||||
compute_merkle_proof(attested_state, next_sync_committee_gindex_at_slot(attested_state.slot)))
|
||||
compute_merkle_proof(
|
||||
attested_state, next_sync_committee_gindex_at_slot(attested_state.slot)
|
||||
)
|
||||
)
|
||||
|
||||
# Indicate finality whenever possible
|
||||
if finalized_block is not None:
|
||||
if finalized_block.message.slot != GENESIS_SLOT:
|
||||
update.finalized_header = block_to_light_client_header(finalized_block)
|
||||
assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
|
||||
assert (
|
||||
hash_tree_root(update.finalized_header.beacon)
|
||||
== attested_state.finalized_checkpoint.root
|
||||
)
|
||||
else:
|
||||
assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
update.finality_branch = FinalityBranch(
|
||||
compute_merkle_proof(attested_state, finalized_root_gindex_at_slot(attested_state.slot)))
|
||||
compute_merkle_proof(attested_state, finalized_root_gindex_at_slot(attested_state.slot))
|
||||
)
|
||||
|
||||
update.sync_aggregate = block.message.body.sync_aggregate
|
||||
update.signature_slot = block.message.slot
|
||||
|
||||
@@ -249,14 +249,12 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
||||
|
||||
# Compare sync committee finality
|
||||
if new_has_finality:
|
||||
new_has_sync_committee_finality = (
|
||||
compute_sync_committee_period_at_slot(new_update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot)
|
||||
)
|
||||
old_has_sync_committee_finality = (
|
||||
compute_sync_committee_period_at_slot(old_update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot)
|
||||
)
|
||||
new_has_sync_committee_finality = compute_sync_committee_period_at_slot(
|
||||
new_update.finalized_header.beacon.slot
|
||||
) == compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot)
|
||||
old_has_sync_committee_finality = compute_sync_committee_period_at_slot(
|
||||
old_update.finalized_header.beacon.slot
|
||||
) == compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot)
|
||||
if new_has_sync_committee_finality != old_has_sync_committee_finality:
|
||||
return new_has_sync_committee_finality
|
||||
|
||||
@@ -283,26 +281,28 @@ def is_next_sync_committee_known(store: LightClientStore) -> bool:
|
||||
|
||||
```python
|
||||
def get_safety_threshold(store: LightClientStore) -> uint64:
|
||||
return max(
|
||||
store.previous_max_active_participants,
|
||||
store.current_max_active_participants,
|
||||
) // 2
|
||||
return (
|
||||
max(
|
||||
store.previous_max_active_participants,
|
||||
store.current_max_active_participants,
|
||||
)
|
||||
// 2
|
||||
)
|
||||
```
|
||||
|
||||
### `get_subtree_index`
|
||||
|
||||
```python
|
||||
def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
|
||||
return uint64(generalized_index % 2**(floorlog2(generalized_index)))
|
||||
return uint64(generalized_index % 2 ** (floorlog2(generalized_index)))
|
||||
```
|
||||
|
||||
### `is_valid_normalized_merkle_branch`
|
||||
|
||||
```python
|
||||
def is_valid_normalized_merkle_branch(leaf: Bytes32,
|
||||
branch: Sequence[Bytes32],
|
||||
gindex: GeneralizedIndex,
|
||||
root: Root) -> bool:
|
||||
def is_valid_normalized_merkle_branch(
|
||||
leaf: Bytes32, branch: Sequence[Bytes32], gindex: GeneralizedIndex, root: Root
|
||||
) -> bool:
|
||||
depth = floorlog2(gindex)
|
||||
index = get_subtree_index(gindex)
|
||||
num_extra = len(branch) - depth
|
||||
@@ -329,8 +329,9 @@ with a received `LightClientBootstrap` derived from a given
|
||||
### `initialize_light_client_store`
|
||||
|
||||
```python
|
||||
def initialize_light_client_store(trusted_block_root: Root,
|
||||
bootstrap: LightClientBootstrap) -> LightClientStore:
|
||||
def initialize_light_client_store(
|
||||
trusted_block_root: Root, bootstrap: LightClientBootstrap
|
||||
) -> LightClientStore:
|
||||
assert is_valid_light_client_header(bootstrap.header)
|
||||
assert hash_tree_root(bootstrap.header.beacon) == trusted_block_root
|
||||
|
||||
@@ -371,10 +372,12 @@ def initialize_light_client_store(trusted_block_root: Root,
|
||||
### `validate_light_client_update`
|
||||
|
||||
```python
|
||||
def validate_light_client_update(store: LightClientStore,
|
||||
update: LightClientUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
def validate_light_client_update(
|
||||
store: LightClientStore,
|
||||
update: LightClientUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root,
|
||||
) -> None:
|
||||
# Verify sync committee has sufficient participants
|
||||
sync_aggregate = update.sync_aggregate
|
||||
assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
@@ -397,8 +400,7 @@ def validate_light_client_update(store: LightClientStore,
|
||||
is_sync_committee_update(update) and update_attested_period == store_period
|
||||
)
|
||||
assert (
|
||||
update_attested_slot > store.finalized_header.beacon.slot
|
||||
or update_has_next_sync_committee
|
||||
update_attested_slot > store.finalized_header.beacon.slot or update_has_next_sync_committee
|
||||
)
|
||||
|
||||
# Verify that the `finality_branch`, if present, confirms `finalized_header`
|
||||
@@ -440,14 +442,17 @@ def validate_light_client_update(store: LightClientStore,
|
||||
else:
|
||||
sync_committee = store.next_sync_committee
|
||||
participant_pubkeys = [
|
||||
pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
|
||||
pubkey
|
||||
for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys)
|
||||
if bit
|
||||
]
|
||||
fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1)
|
||||
fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot))
|
||||
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
|
||||
signing_root = compute_signing_root(update.attested_header.beacon, domain)
|
||||
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||
assert bls.FastAggregateVerify(
|
||||
participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature
|
||||
)
|
||||
```
|
||||
|
||||
### `apply_light_client_update`
|
||||
@@ -455,7 +460,9 @@ def validate_light_client_update(store: LightClientStore,
|
||||
```python
|
||||
def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot)
|
||||
update_finalized_period = compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot)
|
||||
update_finalized_period = compute_sync_committee_period_at_slot(
|
||||
update.finalized_header.beacon.slot
|
||||
)
|
||||
if not is_next_sync_committee_known(store):
|
||||
assert update_finalized_period == store_period
|
||||
store.next_sync_committee = update.next_sync_committee
|
||||
@@ -482,7 +489,10 @@ def process_light_client_store_force_update(store: LightClientStore, current_slo
|
||||
# Because the apply logic waits for `finalized_header.beacon.slot` to indicate sync committee finality,
|
||||
# the `attested_header` may be treated as `finalized_header` in extended periods of non-finality
|
||||
# to guarantee progression into later sync committee periods according to `is_better_update`.
|
||||
if store.best_valid_update.finalized_header.beacon.slot <= store.finalized_header.beacon.slot:
|
||||
if (
|
||||
store.best_valid_update.finalized_header.beacon.slot
|
||||
<= store.finalized_header.beacon.slot
|
||||
):
|
||||
store.best_valid_update.finalized_header = store.best_valid_update.attested_header
|
||||
apply_light_client_update(store, store.best_valid_update)
|
||||
store.best_valid_update = None
|
||||
@@ -491,19 +501,18 @@ def process_light_client_store_force_update(store: LightClientStore, current_slo
|
||||
### `process_light_client_update`
|
||||
|
||||
```python
|
||||
def process_light_client_update(store: LightClientStore,
|
||||
update: LightClientUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
def process_light_client_update(
|
||||
store: LightClientStore,
|
||||
update: LightClientUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root,
|
||||
) -> None:
|
||||
validate_light_client_update(store, update, current_slot, genesis_validators_root)
|
||||
|
||||
sync_committee_bits = update.sync_aggregate.sync_committee_bits
|
||||
|
||||
# Update the best update in case we have to force-update to it if the timeout elapses
|
||||
if (
|
||||
store.best_valid_update is None
|
||||
or is_better_update(update, store.best_valid_update)
|
||||
):
|
||||
if store.best_valid_update is None or is_better_update(update, store.best_valid_update):
|
||||
store.best_valid_update = update
|
||||
|
||||
# Track the maximum number of active participants in the committee signatures
|
||||
@@ -522,17 +531,16 @@ def process_light_client_update(store: LightClientStore,
|
||||
# Update finalized header
|
||||
update_has_finalized_next_sync_committee = (
|
||||
not is_next_sync_committee_known(store)
|
||||
and is_sync_committee_update(update) and is_finality_update(update) and (
|
||||
and is_sync_committee_update(update)
|
||||
and is_finality_update(update)
|
||||
and (
|
||||
compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(update.attested_header.beacon.slot)
|
||||
)
|
||||
)
|
||||
if (
|
||||
sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
|
||||
and (
|
||||
update.finalized_header.beacon.slot > store.finalized_header.beacon.slot
|
||||
or update_has_finalized_next_sync_committee
|
||||
)
|
||||
if sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2 and (
|
||||
update.finalized_header.beacon.slot > store.finalized_header.beacon.slot
|
||||
or update_has_finalized_next_sync_committee
|
||||
):
|
||||
# Normal update through 2/3 threshold
|
||||
apply_light_client_update(store, update)
|
||||
@@ -542,10 +550,12 @@ def process_light_client_update(store: LightClientStore,
|
||||
### `process_light_client_finality_update`
|
||||
|
||||
```python
|
||||
def process_light_client_finality_update(store: LightClientStore,
|
||||
finality_update: LightClientFinalityUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
def process_light_client_finality_update(
|
||||
store: LightClientStore,
|
||||
finality_update: LightClientFinalityUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root,
|
||||
) -> None:
|
||||
update = LightClientUpdate(
|
||||
attested_header=finality_update.attested_header,
|
||||
next_sync_committee=SyncCommittee(),
|
||||
@@ -561,10 +571,12 @@ def process_light_client_finality_update(store: LightClientStore,
|
||||
### `process_light_client_optimistic_update`
|
||||
|
||||
```python
|
||||
def process_light_client_optimistic_update(store: LightClientStore,
|
||||
optimistic_update: LightClientOptimisticUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
def process_light_client_optimistic_update(
|
||||
store: LightClientStore,
|
||||
optimistic_update: LightClientOptimisticUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root,
|
||||
) -> None:
|
||||
update = LightClientUpdate(
|
||||
attested_header=optimistic_update.attested_header,
|
||||
next_sync_committee=SyncCommittee(),
|
||||
|
||||
@@ -145,11 +145,15 @@ The following validations MUST pass before forwarding the
|
||||
`get_sync_subcommittee_pubkeys` for convenience:
|
||||
|
||||
```python
|
||||
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
|
||||
def get_sync_subcommittee_pubkeys(
|
||||
state: BeaconState, subcommittee_index: uint64
|
||||
) -> Sequence[BLSPubkey]:
|
||||
# Committees assigned to `slot` sign for `slot - 1`
|
||||
# This creates the exceptional logic below when transitioning between sync committee periods
|
||||
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(
|
||||
next_slot_epoch
|
||||
):
|
||||
sync_committee = state.current_sync_committee
|
||||
else:
|
||||
sync_committee = state.next_sync_committee
|
||||
@@ -157,7 +161,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
|
||||
# Return pubkeys for the subcommittee index
|
||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||
i = subcommittee_index * sync_subcommittee_size
|
||||
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||
return sync_committee.pubkeys[i : i + sync_subcommittee_size]
|
||||
```
|
||||
|
||||
- _[IGNORE]_ The contribution's slot is for the current slot (with a
|
||||
|
||||
@@ -154,9 +154,9 @@ def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
||||
```
|
||||
|
||||
```python
|
||||
def is_assigned_to_sync_committee(state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex) -> bool:
|
||||
def is_assigned_to_sync_committee(
|
||||
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex
|
||||
) -> bool:
|
||||
sync_committee_period = compute_sync_committee_period(epoch)
|
||||
current_epoch = get_current_epoch(state)
|
||||
current_sync_committee_period = compute_sync_committee_period(current_epoch)
|
||||
@@ -257,8 +257,9 @@ Given a collection of the best seen `contributions` (with no repeating
|
||||
proposer processes them as follows:
|
||||
|
||||
```python
|
||||
def process_sync_committee_contributions(block: BeaconBlock,
|
||||
contributions: Set[SyncCommitteeContribution]) -> None:
|
||||
def process_sync_committee_contributions(
|
||||
block: BeaconBlock, contributions: Set[SyncCommitteeContribution]
|
||||
) -> None:
|
||||
sync_aggregate = SyncAggregate()
|
||||
signatures = []
|
||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||
@@ -332,10 +333,9 @@ the index of the validator in the registry `state.validators` controlled by
|
||||
`privkey`, and `privkey` is the BLS private key for the validator.
|
||||
|
||||
```python
|
||||
def get_sync_committee_message(state: BeaconState,
|
||||
block_root: Root,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int) -> SyncCommitteeMessage:
|
||||
def get_sync_committee_message(
|
||||
state: BeaconState, block_root: Root, validator_index: ValidatorIndex, privkey: int
|
||||
) -> SyncCommitteeMessage:
|
||||
epoch = get_current_epoch(state)
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch)
|
||||
signing_root = compute_signing_root(block_root, domain)
|
||||
@@ -364,19 +364,27 @@ index is included multiple times in a given sync committee across multiple
|
||||
subcommittees.
|
||||
|
||||
```python
|
||||
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[SubnetID]:
|
||||
def compute_subnets_for_sync_committee(
|
||||
state: BeaconState, validator_index: ValidatorIndex
|
||||
) -> Set[SubnetID]:
|
||||
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(
|
||||
next_slot_epoch
|
||||
):
|
||||
sync_committee = state.current_sync_committee
|
||||
else:
|
||||
sync_committee = state.next_sync_committee
|
||||
|
||||
target_pubkey = state.validators[validator_index].pubkey
|
||||
sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey]
|
||||
return set([
|
||||
SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
||||
for index in sync_committee_indices
|
||||
])
|
||||
sync_committee_indices = [
|
||||
index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey
|
||||
]
|
||||
return set(
|
||||
[
|
||||
SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
||||
for index in sync_committee_indices
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
*Note*: Subnet assignment does not change during the duration of a validator's
|
||||
@@ -403,10 +411,9 @@ period), the `subcommittee_index` equal to the `subnet_id`, and the `privkey` is
|
||||
the BLS private key associated with the validator.
|
||||
|
||||
```python
|
||||
def get_sync_committee_selection_proof(state: BeaconState,
|
||||
slot: Slot,
|
||||
subcommittee_index: uint64,
|
||||
privkey: int) -> BLSSignature:
|
||||
def get_sync_committee_selection_proof(
|
||||
state: BeaconState, slot: Slot, subcommittee_index: uint64, privkey: int
|
||||
) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, compute_epoch_at_slot(slot))
|
||||
signing_data = SyncAggregatorSelectionData(
|
||||
slot=slot,
|
||||
@@ -418,7 +425,12 @@ def get_sync_committee_selection_proof(state: BeaconState,
|
||||
|
||||
```python
|
||||
def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
|
||||
modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
|
||||
modulo = max(
|
||||
1,
|
||||
SYNC_COMMITTEE_SIZE
|
||||
// SYNC_COMMITTEE_SUBNET_COUNT
|
||||
// TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE,
|
||||
)
|
||||
return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
|
||||
```
|
||||
|
||||
@@ -503,10 +515,12 @@ First,
|
||||
is constructed.
|
||||
|
||||
```python
|
||||
def get_contribution_and_proof(state: BeaconState,
|
||||
aggregator_index: ValidatorIndex,
|
||||
contribution: SyncCommitteeContribution,
|
||||
privkey: int) -> ContributionAndProof:
|
||||
def get_contribution_and_proof(
|
||||
state: BeaconState,
|
||||
aggregator_index: ValidatorIndex,
|
||||
contribution: SyncCommitteeContribution,
|
||||
privkey: int,
|
||||
) -> ContributionAndProof:
|
||||
selection_proof = get_sync_committee_selection_proof(
|
||||
state,
|
||||
contribution.slot,
|
||||
@@ -525,11 +539,13 @@ Then
|
||||
is constructed and broadcast. Where `signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_contribution_and_proof_signature(state: BeaconState,
|
||||
contribution_and_proof: ContributionAndProof,
|
||||
privkey: int) -> BLSSignature:
|
||||
def get_contribution_and_proof_signature(
|
||||
state: BeaconState, contribution_and_proof: ContributionAndProof, privkey: int
|
||||
) -> BLSSignature:
|
||||
contribution = contribution_and_proof.contribution
|
||||
domain = get_domain(state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot))
|
||||
domain = get_domain(
|
||||
state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot)
|
||||
)
|
||||
signing_root = compute_signing_root(contribution_and_proof, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
@@ -250,10 +250,14 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
matching_target_indices = get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, previous_epoch
|
||||
)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
if index not in matching_target_indices:
|
||||
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
penalty_numerator = (
|
||||
state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
)
|
||||
# [Modified in Bellatrix]
|
||||
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_BELLATRIX
|
||||
penalties[index] += Gwei(penalty_numerator // penalty_denominator)
|
||||
@@ -268,9 +272,9 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
`MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX`.
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
def slash_validator(
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
@@ -278,9 +282,12 @@ def slash_validator(state: BeaconState,
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
validator.withdrawable_epoch = max(
|
||||
validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)
|
||||
)
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX # [Modified in Bellatrix]
|
||||
# [Modified in Bellatrix]
|
||||
slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX
|
||||
decrease_balance(state, slashed_index, slashing_penalty)
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
@@ -346,14 +353,15 @@ def is_valid_block_hash(self: ExecutionEngine, execution_payload: ExecutionPaylo
|
||||
#### `verify_and_notify_new_payload`
|
||||
|
||||
```python
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
def verify_and_notify_new_payload(
|
||||
self: ExecutionEngine, new_payload_request: NewPayloadRequest
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``new_payload_request`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
execution_payload = new_payload_request.execution_payload
|
||||
|
||||
if b'' in execution_payload.transactions:
|
||||
if b"" in execution_payload.transactions:
|
||||
return False
|
||||
|
||||
if not self.is_valid_block_hash(execution_payload):
|
||||
@@ -387,7 +395,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
##### `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -398,7 +408,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.verify_and_notify_new_payload(NewPayloadRequest(execution_payload=payload))
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(execution_payload=payload)
|
||||
)
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
@@ -430,13 +442,19 @@ def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(
|
||||
sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, # [Modified in Bellatrix]
|
||||
total_balance
|
||||
sum(state.slashings)
|
||||
* PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, # [Modified in Bellatrix]
|
||||
total_balance,
|
||||
)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
if (
|
||||
validator.slashed
|
||||
and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch
|
||||
):
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
penalty_numerator = (
|
||||
validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
)
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
@@ -61,12 +61,13 @@ a payload build process on top of `head_block_hash` and returns an identifier of
|
||||
initiated process.
|
||||
|
||||
```python
|
||||
def notify_forkchoice_updated(self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
...
|
||||
def notify_forkchoice_updated(
|
||||
self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes],
|
||||
) -> Optional[PayloadId]: ...
|
||||
```
|
||||
|
||||
*Note*: The `(head_block_hash, finalized_block_hash)` values of the
|
||||
@@ -140,8 +141,9 @@ def should_override_forkchoice_update(store: Store, head_root: Root) -> bool:
|
||||
proposing_on_time = is_proposing_on_time(store)
|
||||
|
||||
# Note that this condition is different from `get_proposer_head`
|
||||
current_time_ok = (head_block.slot == current_slot
|
||||
or (proposal_slot == current_slot and proposing_on_time))
|
||||
current_time_ok = head_block.slot == current_slot or (
|
||||
proposal_slot == current_slot and proposing_on_time
|
||||
)
|
||||
single_slot_reorg = parent_slot_ok and current_time_ok
|
||||
|
||||
# Check the head weight only if the attestations from the head slot have already been applied.
|
||||
@@ -154,9 +156,18 @@ def should_override_forkchoice_update(store: Store, head_root: Root) -> bool:
|
||||
head_weak = True
|
||||
parent_strong = True
|
||||
|
||||
return all([head_late, shuffling_stable, ffg_competitive, finalization_ok,
|
||||
proposing_reorg_slot, single_slot_reorg,
|
||||
head_weak, parent_strong])
|
||||
return all(
|
||||
[
|
||||
head_late,
|
||||
shuffling_stable,
|
||||
ffg_competitive,
|
||||
finalization_ok,
|
||||
proposing_reorg_slot,
|
||||
single_slot_reorg,
|
||||
head_weak,
|
||||
parent_strong,
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
*Note*: The ordering of conditions is a suggestion only. Implementations are
|
||||
|
||||
@@ -75,45 +75,36 @@ state transition.
|
||||
def upgrade_to_bellatrix(pre: altair.BeaconState) -> BeaconState:
|
||||
epoch = altair.get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
# [New in Bellatrix]
|
||||
current_version=BELLATRIX_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
# [New in Bellatrix]
|
||||
latest_execution_payload_header=ExecutionPayloadHeader(),
|
||||
)
|
||||
|
||||
|
||||
@@ -48,7 +48,9 @@ class GetPayloadResponse(object):
|
||||
### `get_pow_block_at_terminal_total_difficulty`
|
||||
|
||||
```python
|
||||
def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
def get_pow_block_at_terminal_total_difficulty(
|
||||
pow_chain: Dict[Hash32, PowBlock],
|
||||
) -> Optional[PowBlock]:
|
||||
# `pow_chain` abstractly represents all blocks in the PoW chain
|
||||
for block in pow_chain.values():
|
||||
block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
@@ -140,16 +142,20 @@ To obtain an execution payload, a block proposer building a block on top of a
|
||||
`fee_recipient` field of the execution payload
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine,
|
||||
pow_chain: Optional[Dict[Hash32, PowBlock]]=None) -> Optional[PayloadId]:
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine,
|
||||
pow_chain: Optional[Dict[Hash32, PowBlock]] = None,
|
||||
) -> Optional[PayloadId]:
|
||||
if not is_merge_transition_complete(state):
|
||||
assert pow_chain is not None
|
||||
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
is_activation_epoch_reached = (
|
||||
get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
)
|
||||
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
# Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||
return None
|
||||
@@ -183,7 +189,9 @@ def prepare_execution_payload(state: BeaconState,
|
||||
where:
|
||||
|
||||
```python
|
||||
def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: ExecutionEngine) -> ExecutionPayload:
|
||||
def get_execution_payload(
|
||||
payload_id: Optional[PayloadId], execution_engine: ExecutionEngine
|
||||
) -> ExecutionPayload:
|
||||
if payload_id is None:
|
||||
# Pre-merge, empty payload
|
||||
return ExecutionPayload()
|
||||
|
||||
@@ -273,7 +273,11 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) ->
|
||||
"""
|
||||
has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
|
||||
return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
|
||||
return (
|
||||
has_eth1_withdrawal_credential(validator)
|
||||
and has_max_effective_balance
|
||||
and has_excess_balance
|
||||
)
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
@@ -340,20 +344,24 @@ def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
))
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - MAX_EFFECTIVE_BALANCE,
|
||||
))
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - MAX_EFFECTIVE_BALANCE,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
@@ -379,7 +387,9 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
@@ -395,7 +405,9 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
check.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
# [Modified in Capella] Removed `is_merge_transition_complete` check in Capella
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -405,7 +417,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.verify_and_notify_new_payload(NewPayloadRequest(execution_payload=payload))
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(execution_payload=payload)
|
||||
)
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
@@ -435,7 +449,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
assert len(body.deposits) == min(
|
||||
MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index
|
||||
)
|
||||
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
@@ -453,8 +469,9 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
#### New `process_bls_to_execution_change`
|
||||
|
||||
```python
|
||||
def process_bls_to_execution_change(state: BeaconState,
|
||||
signed_address_change: SignedBLSToExecutionChange) -> None:
|
||||
def process_bls_to_execution_change(
|
||||
state: BeaconState, signed_address_change: SignedBLSToExecutionChange
|
||||
) -> None:
|
||||
address_change = signed_address_change.message
|
||||
|
||||
assert address_change.validator_index < len(state.validators)
|
||||
@@ -465,13 +482,13 @@ def process_bls_to_execution_change(state: BeaconState,
|
||||
assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
|
||||
|
||||
# Fork-agnostic domain since address changes are valid across forks
|
||||
domain = compute_domain(DOMAIN_BLS_TO_EXECUTION_CHANGE, genesis_validators_root=state.genesis_validators_root)
|
||||
domain = compute_domain(
|
||||
DOMAIN_BLS_TO_EXECUTION_CHANGE, genesis_validators_root=state.genesis_validators_root
|
||||
)
|
||||
signing_root = compute_signing_root(address_change, domain)
|
||||
assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
|
||||
|
||||
validator.withdrawal_credentials = (
|
||||
ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
+ b'\x00' * 11
|
||||
+ address_change.to_execution_address
|
||||
ETH1_ADDRESS_WITHDRAWAL_PREFIX + b"\x00" * 11 + address_change.to_execution_address
|
||||
)
|
||||
```
|
||||
|
||||
@@ -37,12 +37,13 @@ addition of `withdrawals`. Otherwise, `notify_forkchoice_updated` inherits all
|
||||
prior functionality.
|
||||
|
||||
```python
|
||||
def notify_forkchoice_updated(self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
...
|
||||
def notify_forkchoice_updated(
|
||||
self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes],
|
||||
) -> Optional[PayloadId]: ...
|
||||
```
|
||||
|
||||
## Helpers
|
||||
|
||||
@@ -87,10 +87,10 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
|
||||
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=Root(), # [New in Capella]
|
||||
# [New in Capella]
|
||||
withdrawals_root=Root(),
|
||||
)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
@@ -99,42 +99,33 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
|
||||
current_version=CAPELLA_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=WithdrawalIndex(0), # [New in Capella]
|
||||
next_withdrawal_validator_index=ValidatorIndex(0), # [New in Capella]
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]([]), # [New in Capella]
|
||||
# [New in Capella]
|
||||
next_withdrawal_index=WithdrawalIndex(0),
|
||||
# [New in Capella]
|
||||
next_withdrawal_validator_index=ValidatorIndex(0),
|
||||
# [New in Capella]
|
||||
historical_summaries=List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]([]),
|
||||
)
|
||||
|
||||
return post
|
||||
|
||||
@@ -52,7 +52,9 @@ def upgrade_lc_update_to_capella(pre: bellatrix.LightClientUpdate) -> LightClien
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_capella(pre: bellatrix.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
def upgrade_lc_finality_update_to_capella(
|
||||
pre: bellatrix.LightClientFinalityUpdate,
|
||||
) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_capella(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_capella(pre.finalized_header),
|
||||
@@ -63,7 +65,9 @@ def upgrade_lc_finality_update_to_capella(pre: bellatrix.LightClientFinalityUpda
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_capella(pre: bellatrix.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
def upgrade_lc_optimistic_update_to_capella(
|
||||
pre: bellatrix.LightClientOptimisticUpdate,
|
||||
) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_capella(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
|
||||
@@ -41,7 +41,8 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
execution_branch = ExecutionBranch(
|
||||
compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX))
|
||||
compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX)
|
||||
)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
|
||||
@@ -80,11 +80,13 @@ processed through any empty slots up to the assigned slot using
|
||||
`PayloadAttributes`.
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine,
|
||||
) -> Optional[PayloadId]:
|
||||
# [Modified in Capella] Removed `is_merge_transition_complete` check in Capella
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
|
||||
@@ -231,9 +231,9 @@ attestation that contributes to justification of the contained chain for
|
||||
EIP-7045.
|
||||
|
||||
```python
|
||||
def get_attestation_participation_flag_indices(state: BeaconState,
|
||||
data: AttestationData,
|
||||
inclusion_delay: uint64) -> Sequence[int]:
|
||||
def get_attestation_participation_flag_indices(
|
||||
state: BeaconState, data: AttestationData, inclusion_delay: uint64
|
||||
) -> Sequence[int]:
|
||||
"""
|
||||
Return the flag indices that are satisfied by an attestation.
|
||||
"""
|
||||
@@ -244,8 +244,12 @@ def get_attestation_participation_flag_indices(state: BeaconState,
|
||||
|
||||
# Matching roots
|
||||
is_matching_source = data.source == justified_checkpoint
|
||||
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
|
||||
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
|
||||
is_matching_target = is_matching_source and data.target.root == get_block_root(
|
||||
state, data.target.epoch
|
||||
)
|
||||
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(
|
||||
state, data.slot
|
||||
)
|
||||
assert is_matching_source
|
||||
|
||||
participation_flag_indices = []
|
||||
@@ -293,9 +297,9 @@ class NewPayloadRequest(object):
|
||||
`parent_beacon_block_root` parameter for EIP-4788.
|
||||
|
||||
```python
|
||||
def is_valid_block_hash(self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root) -> bool:
|
||||
def is_valid_block_hash(
|
||||
self: ExecutionEngine, execution_payload: ExecutionPayload, parent_beacon_block_root: Root
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload.block_hash`` is computed correctly.
|
||||
"""
|
||||
@@ -305,7 +309,9 @@ def is_valid_block_hash(self: ExecutionEngine,
|
||||
##### `is_valid_versioned_hashes`
|
||||
|
||||
```python
|
||||
def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPayloadRequest) -> bool:
|
||||
def is_valid_versioned_hashes(
|
||||
self: ExecutionEngine, new_payload_request: NewPayloadRequest
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if the version hashes computed by the blob transactions of
|
||||
``new_payload_request.execution_payload`` matches ``new_payload_request.versioned_hashes``.
|
||||
@@ -319,9 +325,9 @@ def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPay
|
||||
`parent_beacon_block_root` parameter for EIP-4788.
|
||||
|
||||
```python
|
||||
def notify_new_payload(self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root) -> bool:
|
||||
def notify_new_payload(
|
||||
self: ExecutionEngine, execution_payload: ExecutionPayload, parent_beacon_block_root: Root
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
@@ -331,15 +337,17 @@ def notify_new_payload(self: ExecutionEngine,
|
||||
##### Modified `verify_and_notify_new_payload`
|
||||
|
||||
```python
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
def verify_and_notify_new_payload(
|
||||
self: ExecutionEngine, new_payload_request: NewPayloadRequest
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``new_payload_request`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
execution_payload = new_payload_request.execution_payload
|
||||
parent_beacon_block_root = new_payload_request.parent_beacon_block_root # [New in Deneb:EIP4788]
|
||||
# [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root = new_payload_request.parent_beacon_block_root
|
||||
|
||||
if b'' in execution_payload.transactions:
|
||||
if b"" in execution_payload.transactions:
|
||||
return False
|
||||
|
||||
# [Modified in Deneb:EIP4788]
|
||||
@@ -379,7 +387,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
# Participation flag indices
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(
|
||||
state, data, state.slot - data.slot
|
||||
)
|
||||
|
||||
# Verify signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
@@ -393,12 +403,16 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, attestation):
|
||||
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
if flag_index in participation_flag_indices and not has_flag(
|
||||
epoch_participation[index], flag_index
|
||||
):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
|
||||
# Reward proposer
|
||||
proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
proposer_reward_denominator = (
|
||||
(WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
)
|
||||
proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
@@ -413,7 +427,9 @@ assign the new fields in `ExecutionPayloadHeader` for EIP-4844. It is also
|
||||
modified to pass in the parent beacon block root to support EIP-4788.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -429,7 +445,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
# Verify the execution payload is valid
|
||||
# [Modified in Deneb:EIP4844] Pass `versioned_hashes` to Execution Engine
|
||||
# [Modified in Deneb:EIP4788] Pass `parent_beacon_block_root` to Execution Engine
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments
|
||||
]
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(
|
||||
execution_payload=payload,
|
||||
@@ -479,7 +497,9 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
|
||||
assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||
# Verify signature
|
||||
# [Modified in Deneb:EIP7044]
|
||||
domain = compute_domain(DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root)
|
||||
domain = compute_domain(
|
||||
DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root
|
||||
)
|
||||
signing_root = compute_signing_root(voluntary_exit, domain)
|
||||
assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
|
||||
# Initiate exit
|
||||
@@ -508,14 +528,18 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||
initiate_validator_exit(state, ValidatorIndex(index))
|
||||
|
||||
# Queue validators eligible for activation and not yet dequeued for activation
|
||||
activation_queue = sorted([
|
||||
index for index, validator in enumerate(state.validators)
|
||||
if is_eligible_for_activation(state, validator)
|
||||
activation_queue = sorted(
|
||||
[
|
||||
index
|
||||
for index, validator in enumerate(state.validators)
|
||||
if is_eligible_for_activation(state, validator)
|
||||
],
|
||||
# Order by the sequence of activation_eligibility_epoch setting and then index
|
||||
], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
|
||||
key=lambda index: (state.validators[index].activation_eligibility_epoch, index),
|
||||
)
|
||||
# Dequeued validators for activation up to activation churn limit
|
||||
# [Modified in Deneb:EIP7514]
|
||||
for index in activation_queue[:get_validator_activation_churn_limit(state)]:
|
||||
for index in activation_queue[: get_validator_activation_churn_limit(state)]:
|
||||
validator = state.validators[index]
|
||||
validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
```
|
||||
|
||||
@@ -53,7 +53,9 @@ valid blobs) received on the p2p network MUST NOT invalidate a block that is
|
||||
otherwise valid and available.
|
||||
|
||||
```python
|
||||
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
def is_data_available(
|
||||
beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]
|
||||
) -> bool:
|
||||
# `retrieve_blobs_and_proofs` is implementation and context dependent
|
||||
# It returns all the blobs for the given block root, and raises an exception if not available
|
||||
# Note: the p2p network does not guarantee sidecar retrieval outside of
|
||||
|
||||
@@ -78,54 +78,45 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
blob_gas_used=uint64(0), # [New in Deneb:EIP4844]
|
||||
excess_blob_gas=uint64(0), # [New in Deneb:EIP4844]
|
||||
# [New in Deneb:EIP4844]
|
||||
blob_gas_used=uint64(0),
|
||||
# [New in Deneb:EIP4844]
|
||||
excess_blob_gas=uint64(0),
|
||||
)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=DENEB_FORK_VERSION, # [Modified in Deneb]
|
||||
# [Modified in Deneb]
|
||||
current_version=DENEB_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb:EIP4844]
|
||||
# Withdrawals
|
||||
# [Modified in Deneb:EIP4844]
|
||||
latest_execution_payload_header=latest_execution_payload_header,
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
)
|
||||
|
||||
|
||||
@@ -72,7 +72,9 @@ def upgrade_lc_update_to_deneb(pre: capella.LightClientUpdate) -> LightClientUpd
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_deneb(pre: capella.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
def upgrade_lc_finality_update_to_deneb(
|
||||
pre: capella.LightClientFinalityUpdate,
|
||||
) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_deneb(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header),
|
||||
@@ -83,7 +85,9 @@ def upgrade_lc_finality_update_to_deneb(pre: capella.LightClientFinalityUpdate)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_deneb(pre: capella.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
def upgrade_lc_optimistic_update_to_deneb(
|
||||
pre: capella.LightClientOptimisticUpdate,
|
||||
) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_deneb(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
|
||||
@@ -46,7 +46,8 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
execution_header.excess_blob_gas = payload.excess_blob_gas
|
||||
|
||||
execution_branch = ExecutionBranch(
|
||||
compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX))
|
||||
compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX)
|
||||
)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
|
||||
@@ -66,7 +66,9 @@ def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
|
||||
# [New in Deneb:EIP4844]
|
||||
if epoch < DENEB_FORK_EPOCH:
|
||||
if header.execution.blob_gas_used != uint64(0) or header.execution.excess_blob_gas != uint64(0):
|
||||
if header.execution.blob_gas_used != uint64(0):
|
||||
return False
|
||||
if header.execution.excess_blob_gas != uint64(0):
|
||||
return False
|
||||
|
||||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
|
||||
@@ -100,7 +100,9 @@ class BlobIdentifier(Container):
|
||||
|
||||
```python
|
||||
def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool:
|
||||
gindex = get_subtree_index(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments', blob_sidecar.index))
|
||||
gindex = get_subtree_index(
|
||||
get_generalized_index(BeaconBlockBody, "blob_kzg_commitments", blob_sidecar.index)
|
||||
)
|
||||
return is_valid_merkle_branch(
|
||||
leaf=blob_sidecar.kzg_commitment.hash_tree_root(),
|
||||
branch=blob_sidecar.kzg_commitment_inclusion_proof,
|
||||
|
||||
@@ -135,7 +135,7 @@ def reverse_bits(n: int, order: int) -> int:
|
||||
"""
|
||||
assert is_power_of_two(order)
|
||||
# Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
|
||||
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
|
||||
return int(("{:0" + str(order.bit_length() - 1) + "b}").format(n)[::-1], 2)
|
||||
```
|
||||
|
||||
#### `bit_reversal_permutation`
|
||||
@@ -158,9 +158,7 @@ This function performs a multi-scalar multiplication between `points` and
|
||||
`integers`. `points` can either be in G1 or G2.
|
||||
|
||||
```python
|
||||
def multi_exp(_points: Sequence[TPoint],
|
||||
_integers: Sequence[uint64]) -> Sequence[TPoint]:
|
||||
...
|
||||
def multi_exp(_points: Sequence[TPoint], _integers: Sequence[uint64]) -> Sequence[TPoint]: ...
|
||||
```
|
||||
|
||||
#### `hash_to_bls_field`
|
||||
@@ -239,7 +237,9 @@ def blob_to_polynomial(blob: Blob) -> Polynomial:
|
||||
"""
|
||||
polynomial = Polynomial()
|
||||
for i in range(FIELD_ELEMENTS_PER_BLOB):
|
||||
value = bytes_to_bls_field(blob[i * BYTES_PER_FIELD_ELEMENT: (i + 1) * BYTES_PER_FIELD_ELEMENT])
|
||||
value = bytes_to_bls_field(
|
||||
blob[i * BYTES_PER_FIELD_ELEMENT : (i + 1) * BYTES_PER_FIELD_ELEMENT]
|
||||
)
|
||||
polynomial[i] = value
|
||||
return polynomial
|
||||
```
|
||||
@@ -266,7 +266,9 @@ def compute_challenge(blob: Blob, commitment: KZGCommitment) -> BLSFieldElement:
|
||||
#### `g1_lincomb`
|
||||
|
||||
```python
|
||||
def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]) -> KZGCommitment:
|
||||
def g1_lincomb(
|
||||
points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]
|
||||
) -> KZGCommitment:
|
||||
"""
|
||||
BLS multiscalar multiplication in G1. This can be naively implemented using double-and-add.
|
||||
"""
|
||||
@@ -306,7 +308,9 @@ def compute_roots_of_unity(order: uint64) -> Sequence[BLSFieldElement]:
|
||||
Return roots of unity of ``order``.
|
||||
"""
|
||||
assert (BLS_MODULUS - 1) % int(order) == 0
|
||||
root_of_unity = BLSFieldElement(pow(PRIMITIVE_ROOT_OF_UNITY, (BLS_MODULUS - 1) // int(order), BLS_MODULUS))
|
||||
root_of_unity = BLSFieldElement(
|
||||
pow(PRIMITIVE_ROOT_OF_UNITY, (BLS_MODULUS - 1) // int(order), BLS_MODULUS)
|
||||
)
|
||||
return compute_powers(root_of_unity, order)
|
||||
```
|
||||
|
||||
@@ -315,7 +319,9 @@ def compute_roots_of_unity(order: uint64) -> Sequence[BLSFieldElement]:
|
||||
#### `evaluate_polynomial_in_evaluation_form`
|
||||
|
||||
```python
|
||||
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial, z: BLSFieldElement) -> BLSFieldElement:
|
||||
def evaluate_polynomial_in_evaluation_form(
|
||||
polynomial: Polynomial, z: BLSFieldElement
|
||||
) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``.
|
||||
- When ``z`` is in the domain, the evaluation can be found by indexing the polynomial at the
|
||||
@@ -362,10 +368,9 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
|
||||
#### `verify_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof(commitment_bytes: Bytes48,
|
||||
z_bytes: Bytes32,
|
||||
y_bytes: Bytes32,
|
||||
proof_bytes: Bytes48) -> bool:
|
||||
def verify_kzg_proof(
|
||||
commitment_bytes: Bytes48, z_bytes: Bytes32, y_bytes: Bytes32, proof_bytes: Bytes48
|
||||
) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
Receives inputs as bytes.
|
||||
@@ -376,19 +381,20 @@ def verify_kzg_proof(commitment_bytes: Bytes48,
|
||||
assert len(y_bytes) == BYTES_PER_FIELD_ELEMENT
|
||||
assert len(proof_bytes) == BYTES_PER_PROOF
|
||||
|
||||
return verify_kzg_proof_impl(bytes_to_kzg_commitment(commitment_bytes),
|
||||
bytes_to_bls_field(z_bytes),
|
||||
bytes_to_bls_field(y_bytes),
|
||||
bytes_to_kzg_proof(proof_bytes))
|
||||
return verify_kzg_proof_impl(
|
||||
bytes_to_kzg_commitment(commitment_bytes),
|
||||
bytes_to_bls_field(z_bytes),
|
||||
bytes_to_bls_field(y_bytes),
|
||||
bytes_to_kzg_proof(proof_bytes),
|
||||
)
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof_impl`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_impl(commitment: KZGCommitment,
|
||||
z: BLSFieldElement,
|
||||
y: BLSFieldElement,
|
||||
proof: KZGProof) -> bool:
|
||||
def verify_kzg_proof_impl(
|
||||
commitment: KZGCommitment, z: BLSFieldElement, y: BLSFieldElement, proof: KZGProof
|
||||
) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
"""
|
||||
@@ -398,19 +404,20 @@ def verify_kzg_proof_impl(commitment: KZGCommitment,
|
||||
bls.multiply(bls.G2(), -z),
|
||||
)
|
||||
P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), -y))
|
||||
return bls.pairing_check([
|
||||
[P_minus_y, bls.neg(bls.G2())],
|
||||
[bls.bytes48_to_G1(proof), X_minus_z]
|
||||
])
|
||||
return bls.pairing_check(
|
||||
[[P_minus_y, bls.neg(bls.G2())], [bls.bytes48_to_G1(proof), X_minus_z]]
|
||||
)
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof_batch`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
|
||||
zs: Sequence[BLSFieldElement],
|
||||
ys: Sequence[BLSFieldElement],
|
||||
proofs: Sequence[KZGProof]) -> bool:
|
||||
def verify_kzg_proof_batch(
|
||||
commitments: Sequence[KZGCommitment],
|
||||
zs: Sequence[BLSFieldElement],
|
||||
ys: Sequence[BLSFieldElement],
|
||||
proofs: Sequence[KZGProof],
|
||||
) -> bool:
|
||||
"""
|
||||
Verify multiple KZG proofs efficiently.
|
||||
"""
|
||||
@@ -434,15 +441,25 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
|
||||
# e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1])
|
||||
proof_lincomb = g1_lincomb(proofs, r_powers)
|
||||
proof_z_lincomb = g1_lincomb(proofs, [z * r_power for z, r_power in zip(zs, r_powers)])
|
||||
C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), -y))
|
||||
for commitment, y in zip(commitments, ys)]
|
||||
C_minus_ys = [
|
||||
bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), -y))
|
||||
for commitment, y in zip(commitments, ys)
|
||||
]
|
||||
C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys]
|
||||
C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers)
|
||||
|
||||
return bls.pairing_check([
|
||||
[bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[1]))],
|
||||
[bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2()]
|
||||
])
|
||||
return bls.pairing_check(
|
||||
[
|
||||
[
|
||||
bls.bytes48_to_G1(proof_lincomb),
|
||||
bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[1])),
|
||||
],
|
||||
[
|
||||
bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)),
|
||||
bls.G2(),
|
||||
],
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
#### `compute_kzg_proof`
|
||||
@@ -464,9 +481,9 @@ def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]:
|
||||
#### `compute_quotient_eval_within_domain`
|
||||
|
||||
```python
|
||||
def compute_quotient_eval_within_domain(z: BLSFieldElement,
|
||||
polynomial: Polynomial,
|
||||
y: BLSFieldElement) -> BLSFieldElement:
|
||||
def compute_quotient_eval_within_domain(
|
||||
z: BLSFieldElement, polynomial: Polynomial, y: BLSFieldElement
|
||||
) -> BLSFieldElement:
|
||||
"""
|
||||
Given `y == p(z)` for a polynomial `p(x)`, compute `q(z)`: the KZG quotient polynomial evaluated at `z` for the
|
||||
special case where `z` is in roots of unity.
|
||||
@@ -491,7 +508,9 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement,
|
||||
#### `compute_kzg_proof_impl`
|
||||
|
||||
```python
|
||||
def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> Tuple[KZGProof, BLSFieldElement]:
|
||||
def compute_kzg_proof_impl(
|
||||
polynomial: Polynomial, z: BLSFieldElement
|
||||
) -> Tuple[KZGProof, BLSFieldElement]:
|
||||
"""
|
||||
Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`.
|
||||
"""
|
||||
@@ -509,12 +528,16 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> Tuple[
|
||||
for i, (a, b) in enumerate(zip(polynomial_shifted, denominator_poly)):
|
||||
if b == BLSFieldElement(0):
|
||||
# The denominator is zero hence `z` is a root of unity: we must handle it as a special case
|
||||
quotient_polynomial[i] = compute_quotient_eval_within_domain(roots_of_unity_brp[i], polynomial, y)
|
||||
quotient_polynomial[i] = compute_quotient_eval_within_domain(
|
||||
roots_of_unity_brp[i], polynomial, y
|
||||
)
|
||||
else:
|
||||
# Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z).
|
||||
quotient_polynomial[i] = a / b
|
||||
|
||||
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_G1_LAGRANGE), quotient_polynomial)), y
|
||||
return KZGProof(
|
||||
g1_lincomb(bit_reversal_permutation(KZG_SETUP_G1_LAGRANGE), quotient_polynomial)
|
||||
), y
|
||||
```
|
||||
|
||||
#### `compute_blob_kzg_proof`
|
||||
@@ -538,9 +561,7 @@ def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof:
|
||||
#### `verify_blob_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_blob_kzg_proof(blob: Blob,
|
||||
commitment_bytes: Bytes48,
|
||||
proof_bytes: Bytes48) -> bool:
|
||||
def verify_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48, proof_bytes: Bytes48) -> bool:
|
||||
"""
|
||||
Given a blob and a KZG proof, verify that the blob data corresponds to the provided commitment.
|
||||
|
||||
@@ -566,9 +587,9 @@ def verify_blob_kzg_proof(blob: Blob,
|
||||
#### `verify_blob_kzg_proof_batch`
|
||||
|
||||
```python
|
||||
def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
|
||||
commitments_bytes: Sequence[Bytes48],
|
||||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||
def verify_blob_kzg_proof_batch(
|
||||
blobs: Sequence[Blob], commitments_bytes: Sequence[Bytes48], proofs_bytes: Sequence[Bytes48]
|
||||
) -> bool:
|
||||
"""
|
||||
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
|
||||
Will return True if there are zero blobs/commitments/proofs.
|
||||
|
||||
@@ -115,11 +115,13 @@ processed through any empty slots up to the assigned slot using
|
||||
beacon block root as an additional parameter to the `PayloadAttributes`.
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine,
|
||||
) -> Optional[PayloadId]:
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
@@ -129,7 +131,9 @@ def prepare_execution_payload(state: BeaconState,
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header), # [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root=hash_tree_root(
|
||||
state.latest_block_header
|
||||
), # [New in Deneb:EIP4788]
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
@@ -163,9 +167,9 @@ to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
|
||||
Each `sidecar` is obtained from:
|
||||
|
||||
```python
|
||||
def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
|
||||
def get_blob_sidecars(
|
||||
signed_block: SignedBeaconBlock, blobs: Sequence[Blob], blob_kzg_proofs: Sequence[KZGProof]
|
||||
) -> Sequence[BlobSidecar]:
|
||||
block = signed_block.message
|
||||
signed_block_header = compute_signed_block_header(signed_block)
|
||||
return [
|
||||
@@ -177,7 +181,7 @@ def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
signed_block_header=signed_block_header,
|
||||
kzg_commitment_inclusion_proof=compute_merkle_proof(
|
||||
block.body,
|
||||
get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments', index),
|
||||
get_generalized_index(BeaconBlockBody, "blob_kzg_commitments", index),
|
||||
),
|
||||
)
|
||||
for index, blob in enumerate(blobs)
|
||||
|
||||
@@ -431,7 +431,9 @@ class BeaconState(Container):
|
||||
8-bit random byte in the effective balance filter.
|
||||
|
||||
```python
|
||||
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
|
||||
def compute_proposer_index(
|
||||
state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32
|
||||
) -> ValidatorIndex:
|
||||
"""
|
||||
Return from ``indices`` a random index sampled by effective balance.
|
||||
"""
|
||||
@@ -444,7 +446,7 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
|
||||
# [Modified in Electra]
|
||||
random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
offset = i % 16 * 2
|
||||
random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
|
||||
random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
# [Modified in Electra:EIP7251]
|
||||
if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
|
||||
@@ -464,7 +466,8 @@ def is_eligible_for_activation_queue(validator: Validator) -> bool:
|
||||
"""
|
||||
return (
|
||||
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
|
||||
and validator.effective_balance >= MIN_ACTIVATION_BALANCE # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
and validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
)
|
||||
```
|
||||
|
||||
@@ -492,7 +495,10 @@ def has_execution_withdrawal_credential(validator: Validator) -> bool:
|
||||
"""
|
||||
Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential.
|
||||
"""
|
||||
return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
|
||||
return (
|
||||
has_eth1_withdrawal_credential(validator) # 0x01
|
||||
or has_compounding_withdrawal_credential(validator) # 0x02
|
||||
)
|
||||
```
|
||||
|
||||
#### Modified `is_fully_withdrawable_validator`
|
||||
@@ -507,7 +513,8 @@ def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch:
|
||||
Check if ``validator`` is fully withdrawable.
|
||||
"""
|
||||
return (
|
||||
has_execution_withdrawal_credential(validator) # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
has_execution_withdrawal_credential(validator)
|
||||
and validator.withdrawable_epoch <= epoch
|
||||
and balance > 0
|
||||
)
|
||||
@@ -526,10 +533,13 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) ->
|
||||
Check if ``validator`` is partially withdrawable.
|
||||
"""
|
||||
max_effective_balance = get_max_effective_balance(validator)
|
||||
has_max_effective_balance = validator.effective_balance == max_effective_balance # [Modified in Electra:EIP7251]
|
||||
has_excess_balance = balance > max_effective_balance # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
has_max_effective_balance = validator.effective_balance == max_effective_balance
|
||||
# [Modified in Electra:EIP7251]
|
||||
has_excess_balance = balance > max_effective_balance
|
||||
return (
|
||||
has_execution_withdrawal_credential(validator) # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
has_execution_withdrawal_credential(validator)
|
||||
and has_max_effective_balance
|
||||
and has_excess_balance
|
||||
)
|
||||
@@ -567,8 +577,7 @@ def get_balance_churn_limit(state: BeaconState) -> Gwei:
|
||||
Return the churn limit for the current epoch.
|
||||
"""
|
||||
churn = max(
|
||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA,
|
||||
get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT
|
||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA, get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT
|
||||
)
|
||||
return churn - churn % EFFECTIVE_BALANCE_INCREMENT
|
||||
```
|
||||
@@ -595,7 +604,8 @@ def get_consolidation_churn_limit(state: BeaconState) -> Gwei:
|
||||
```python
|
||||
def get_pending_balance_to_withdraw(state: BeaconState, validator_index: ValidatorIndex) -> Gwei:
|
||||
return sum(
|
||||
withdrawal.amount for withdrawal in state.pending_partial_withdrawals
|
||||
withdrawal.amount
|
||||
for withdrawal in state.pending_partial_withdrawals
|
||||
if withdrawal.validator_index == validator_index
|
||||
)
|
||||
```
|
||||
@@ -615,7 +625,8 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V
|
||||
for committee_index in committee_indices:
|
||||
committee = get_beacon_committee(state, attestation.data.slot, committee_index)
|
||||
committee_attesters = set(
|
||||
attester_index for i, attester_index in enumerate(committee)
|
||||
attester_index
|
||||
for i, attester_index in enumerate(committee)
|
||||
if attestation.aggregation_bits[committee_offset + i]
|
||||
)
|
||||
output = output.union(committee_attesters)
|
||||
@@ -645,12 +656,14 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd
|
||||
i = uint64(0)
|
||||
sync_committee_indices: List[ValidatorIndex] = []
|
||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||
shuffled_index = compute_shuffled_index(
|
||||
uint64(i % active_validator_count), active_validator_count, seed
|
||||
)
|
||||
candidate_index = active_validator_indices[shuffled_index]
|
||||
# [Modified in Electra]
|
||||
random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
offset = i % 16 * 2
|
||||
random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
|
||||
random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
# [Modified in Electra:EIP7251]
|
||||
if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
|
||||
@@ -689,7 +702,9 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
```python
|
||||
def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
validator = state.validators[index]
|
||||
validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
validator.withdrawal_credentials = (
|
||||
COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
)
|
||||
queue_excess_active_balance(state, index)
|
||||
```
|
||||
|
||||
@@ -704,20 +719,24 @@ def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> No
|
||||
validator = state.validators[index]
|
||||
# Use bls.G2_POINT_AT_INFINITY as a signature field placeholder
|
||||
# and GENESIS_SLOT to distinguish from a pending deposit request
|
||||
state.pending_deposits.append(PendingDeposit(
|
||||
pubkey=validator.pubkey,
|
||||
withdrawal_credentials=validator.withdrawal_credentials,
|
||||
amount=excess_balance,
|
||||
signature=bls.G2_POINT_AT_INFINITY,
|
||||
slot=GENESIS_SLOT,
|
||||
))
|
||||
state.pending_deposits.append(
|
||||
PendingDeposit(
|
||||
pubkey=validator.pubkey,
|
||||
withdrawal_credentials=validator.withdrawal_credentials,
|
||||
amount=excess_balance,
|
||||
signature=bls.G2_POINT_AT_INFINITY,
|
||||
slot=GENESIS_SLOT,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
#### New `compute_exit_epoch_and_update_churn`
|
||||
|
||||
```python
|
||||
def compute_exit_epoch_and_update_churn(state: BeaconState, exit_balance: Gwei) -> Epoch:
|
||||
earliest_exit_epoch = max(state.earliest_exit_epoch, compute_activation_exit_epoch(get_current_epoch(state)))
|
||||
earliest_exit_epoch = max(
|
||||
state.earliest_exit_epoch, compute_activation_exit_epoch(get_current_epoch(state))
|
||||
)
|
||||
per_epoch_churn = get_activation_exit_churn_limit(state)
|
||||
# New epoch for exits.
|
||||
if state.earliest_exit_epoch < earliest_exit_epoch:
|
||||
@@ -742,9 +761,12 @@ def compute_exit_epoch_and_update_churn(state: BeaconState, exit_balance: Gwei)
|
||||
#### New `compute_consolidation_epoch_and_update_churn`
|
||||
|
||||
```python
|
||||
def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidation_balance: Gwei) -> Epoch:
|
||||
def compute_consolidation_epoch_and_update_churn(
|
||||
state: BeaconState, consolidation_balance: Gwei
|
||||
) -> Epoch:
|
||||
earliest_consolidation_epoch = max(
|
||||
state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)))
|
||||
state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state))
|
||||
)
|
||||
per_epoch_consolidation_churn = get_consolidation_churn_limit(state)
|
||||
# New epoch for consolidations.
|
||||
if state.earliest_consolidation_epoch < earliest_consolidation_epoch:
|
||||
@@ -760,7 +782,9 @@ def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidati
|
||||
consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn
|
||||
|
||||
# Consume the balance and update state variables.
|
||||
state.consolidation_balance_to_consume = consolidation_balance_to_consume - consolidation_balance
|
||||
state.consolidation_balance_to_consume = (
|
||||
consolidation_balance_to_consume - consolidation_balance
|
||||
)
|
||||
state.earliest_consolidation_epoch = earliest_consolidation_epoch
|
||||
|
||||
return state.earliest_consolidation_epoch
|
||||
@@ -773,9 +797,9 @@ penalty and proposer/whistleblower rewards are calculated in accordance with
|
||||
EIP7251.
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
def slash_validator(
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
@@ -783,7 +807,9 @@ def slash_validator(state: BeaconState,
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
validator.withdrawable_epoch = max(
|
||||
validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)
|
||||
)
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
# [Modified in Electra:EIP7251]
|
||||
slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA
|
||||
@@ -793,8 +819,10 @@ def slash_validator(state: BeaconState,
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
if whistleblower_index is None:
|
||||
whistleblower_index = proposer_index
|
||||
# [Modified in Electra:EIP7251]
|
||||
whistleblower_reward = Gwei(
|
||||
validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA) # [Modified in Electra:EIP7251]
|
||||
validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA
|
||||
)
|
||||
proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
@@ -815,12 +843,17 @@ def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_inactivity_updates(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state) # [Modified in Electra:EIP7251]
|
||||
process_slashings(state) # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
process_registry_updates(state)
|
||||
# [Modified in Electra:EIP7251]
|
||||
process_slashings(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_pending_deposits(state) # [New in Electra:EIP7251]
|
||||
process_pending_consolidations(state) # [New in Electra:EIP7251]
|
||||
process_effective_balance_updates(state) # [Modified in Electra:EIP7251]
|
||||
# [New in Electra:EIP7251]
|
||||
process_pending_deposits(state)
|
||||
# [New in Electra:EIP7251]
|
||||
process_pending_consolidations(state)
|
||||
# [Modified in Electra:EIP7251]
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_summaries_update(state)
|
||||
@@ -845,7 +878,10 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||
for index, validator in enumerate(state.validators):
|
||||
if is_eligible_for_activation_queue(validator): # [Modified in Electra:EIP7251]
|
||||
validator.activation_eligibility_epoch = current_epoch + 1
|
||||
elif is_active_validator(validator, current_epoch) and validator.effective_balance <= EJECTION_BALANCE:
|
||||
elif (
|
||||
is_active_validator(validator, current_epoch)
|
||||
and validator.effective_balance <= EJECTION_BALANCE
|
||||
):
|
||||
initiate_validator_exit(state, ValidatorIndex(index)) # [Modified in Electra:EIP7251]
|
||||
elif is_eligible_for_activation(state, validator):
|
||||
validator.activation_epoch = activation_epoch
|
||||
@@ -861,13 +897,19 @@ def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(
|
||||
sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX,
|
||||
total_balance
|
||||
sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, total_balance
|
||||
)
|
||||
increment = (
|
||||
EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
|
||||
)
|
||||
penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (
|
||||
total_balance // increment
|
||||
)
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
|
||||
penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
if (
|
||||
validator.slashed
|
||||
and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch
|
||||
):
|
||||
effective_balance_increments = validator.effective_balance // increment
|
||||
# [Modified in Electra:EIP7251]
|
||||
penalty = penalty_per_effective_balance_increment * effective_balance_increments
|
||||
@@ -885,12 +927,11 @@ def apply_pending_deposit(state: BeaconState, deposit: PendingDeposit) -> None:
|
||||
if deposit.pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
if is_valid_deposit_signature(
|
||||
deposit.pubkey,
|
||||
deposit.withdrawal_credentials,
|
||||
deposit.amount,
|
||||
deposit.signature
|
||||
deposit.pubkey, deposit.withdrawal_credentials, deposit.amount, deposit.signature
|
||||
):
|
||||
add_validator_to_registry(state, deposit.pubkey, deposit.withdrawal_credentials, deposit.amount)
|
||||
add_validator_to_registry(
|
||||
state, deposit.pubkey, deposit.withdrawal_credentials, deposit.amount
|
||||
)
|
||||
else:
|
||||
validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey))
|
||||
increase_balance(state, validator_index, deposit.amount)
|
||||
@@ -910,7 +951,9 @@ before applying pending deposit:
|
||||
```python
|
||||
def process_pending_deposits(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(
|
||||
state
|
||||
)
|
||||
processed_amount = 0
|
||||
next_deposit_index = 0
|
||||
deposits_to_postpone = []
|
||||
@@ -921,7 +964,8 @@ def process_pending_deposits(state: BeaconState) -> None:
|
||||
# Do not process deposit requests if Eth1 bridge deposits are not yet applied.
|
||||
if (
|
||||
# Is deposit request
|
||||
deposit.slot > GENESIS_SLOT and
|
||||
deposit.slot > GENESIS_SLOT
|
||||
and
|
||||
# There are pending Eth1 bridge deposits
|
||||
state.eth1_deposit_index < state.deposit_requests_start_index
|
||||
):
|
||||
@@ -988,7 +1032,8 @@ def process_pending_consolidations(state: BeaconState) -> None:
|
||||
|
||||
# Calculate the consolidated balance
|
||||
source_effective_balance = min(
|
||||
state.balances[pending_consolidation.source_index], source_validator.effective_balance)
|
||||
state.balances[pending_consolidation.source_index], source_validator.effective_balance
|
||||
)
|
||||
|
||||
# Move active balance to target. Excess balance is withdrawable.
|
||||
decrease_balance(state, pending_consolidation.source_index, source_effective_balance)
|
||||
@@ -1018,7 +1063,9 @@ def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
balance + DOWNWARD_THRESHOLD < validator.effective_balance
|
||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance)
|
||||
validator.effective_balance = min(
|
||||
balance - balance % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance
|
||||
)
|
||||
```
|
||||
|
||||
### Execution engine
|
||||
@@ -1033,7 +1080,8 @@ class NewPayloadRequest(object):
|
||||
execution_payload: ExecutionPayload
|
||||
versioned_hashes: Sequence[VersionedHash]
|
||||
parent_beacon_block_root: Root
|
||||
execution_requests: ExecutionRequests # [New in Electra]
|
||||
# [New in Electra]
|
||||
execution_requests: ExecutionRequests
|
||||
```
|
||||
|
||||
#### Engine APIs
|
||||
@@ -1044,10 +1092,12 @@ class NewPayloadRequest(object):
|
||||
`execution_requests_list`.
|
||||
|
||||
```python
|
||||
def is_valid_block_hash(self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes]) -> bool:
|
||||
def is_valid_block_hash(
|
||||
self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes],
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload.block_hash`` is computed correctly.
|
||||
"""
|
||||
@@ -1060,10 +1110,12 @@ def is_valid_block_hash(self: ExecutionEngine,
|
||||
`execution_requests_list`.
|
||||
|
||||
```python
|
||||
def notify_new_payload(self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes]) -> bool:
|
||||
def notify_new_payload(
|
||||
self: ExecutionEngine,
|
||||
execution_payload: ExecutionPayload,
|
||||
parent_beacon_block_root: Root,
|
||||
execution_requests_list: Sequence[bytes],
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``execution_payload`` and ``execution_requests_list``
|
||||
are valid with respect to ``self.execution_state``.
|
||||
@@ -1078,23 +1130,24 @@ additional parameter `execution_requests_list` when calling
|
||||
`is_valid_block_hash` and `notify_new_payload` in Electra.
|
||||
|
||||
```python
|
||||
def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
new_payload_request: NewPayloadRequest) -> bool:
|
||||
def verify_and_notify_new_payload(
|
||||
self: ExecutionEngine, new_payload_request: NewPayloadRequest
|
||||
) -> bool:
|
||||
"""
|
||||
Return ``True`` if and only if ``new_payload_request`` is valid with respect to ``self.execution_state``.
|
||||
"""
|
||||
execution_payload = new_payload_request.execution_payload
|
||||
parent_beacon_block_root = new_payload_request.parent_beacon_block_root
|
||||
execution_requests_list = get_execution_requests_list(new_payload_request.execution_requests) # [New in Electra]
|
||||
# [New in Electra]
|
||||
execution_requests_list = get_execution_requests_list(new_payload_request.execution_requests)
|
||||
|
||||
if b'' in execution_payload.transactions:
|
||||
if b"" in execution_payload.transactions:
|
||||
return False
|
||||
|
||||
# [Modified in Electra]
|
||||
if not self.is_valid_block_hash(
|
||||
execution_payload,
|
||||
parent_beacon_block_root,
|
||||
execution_requests_list):
|
||||
execution_payload, parent_beacon_block_root, execution_requests_list
|
||||
):
|
||||
return False
|
||||
|
||||
if not self.is_valid_versioned_hashes(new_payload_request):
|
||||
@@ -1102,9 +1155,8 @@ def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
|
||||
# [Modified in Electra]
|
||||
if not self.notify_new_payload(
|
||||
execution_payload,
|
||||
parent_beacon_block_root,
|
||||
execution_requests_list):
|
||||
execution_payload, parent_beacon_block_root, execution_requests_list
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -1115,11 +1167,14 @@ def verify_and_notify_new_payload(self: ExecutionEngine,
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
process_withdrawals(state, block.body.execution_payload) # [Modified in Electra:EIP7251]
|
||||
process_execution_payload(state, block.body, EXECUTION_ENGINE) # [Modified in Electra:EIP6110]
|
||||
# [Modified in Electra:EIP7251]
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
# [Modified in Electra:EIP6110]
|
||||
process_execution_payload(state, block.body, EXECUTION_ENGINE)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in Electra:EIP6110:EIP7002:EIP7549:EIP7251]
|
||||
# [Modified in Electra:EIP6110:EIP7002:EIP7549:EIP7251]
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
```
|
||||
|
||||
@@ -1139,24 +1194,33 @@ def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal],
|
||||
|
||||
# [New in Electra:EIP7251] Consume pending partial withdrawals
|
||||
for withdrawal in state.pending_partial_withdrawals:
|
||||
if withdrawal.withdrawable_epoch > epoch or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP:
|
||||
if (
|
||||
withdrawal.withdrawable_epoch > epoch
|
||||
or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP
|
||||
):
|
||||
break
|
||||
|
||||
validator = state.validators[withdrawal.validator_index]
|
||||
has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index)
|
||||
total_withdrawn = sum(
|
||||
w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index
|
||||
)
|
||||
balance = state.balances[withdrawal.validator_index] - total_withdrawn
|
||||
has_excess_balance = balance > MIN_ACTIVATION_BALANCE
|
||||
if validator.exit_epoch == FAR_FUTURE_EPOCH and has_sufficient_effective_balance and has_excess_balance:
|
||||
withdrawable_balance = min(
|
||||
balance - MIN_ACTIVATION_BALANCE,
|
||||
withdrawal.amount)
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=withdrawal.validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=withdrawable_balance,
|
||||
))
|
||||
if (
|
||||
validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
and has_sufficient_effective_balance
|
||||
and has_excess_balance
|
||||
):
|
||||
withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount)
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=withdrawal.validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=withdrawable_balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
|
||||
processed_partial_withdrawals_count += 1
|
||||
@@ -1169,20 +1233,25 @@ def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal],
|
||||
total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index)
|
||||
balance = state.balances[validator_index] - total_withdrawn
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
))
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - get_max_effective_balance(validator), # [Modified in Electra:EIP7251]
|
||||
))
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
# [Modified in Electra:EIP7251]
|
||||
amount=balance - get_max_effective_balance(validator),
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
@@ -1204,8 +1273,10 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
|
||||
# Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
|
||||
# [New in Electra:EIP7251] Update pending partial withdrawals
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[
|
||||
processed_partial_withdrawals_count:
|
||||
]
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
@@ -1215,7 +1286,9 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
@@ -1253,7 +1326,9 @@ def get_execution_requests_list(execution_requests: ExecutionRequests) -> Sequen
|
||||
the updated `NewPayloadRequest`).
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -1262,16 +1337,19 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA # [Modified in Electra:EIP7691]
|
||||
# [Modified in Electra:EIP7691] Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA
|
||||
# Verify the execution payload is valid
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments
|
||||
]
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(
|
||||
execution_payload=payload,
|
||||
versioned_hashes=versioned_hashes,
|
||||
parent_beacon_block_root=state.latest_block_header.parent_root,
|
||||
execution_requests=body.execution_requests, # [New in Electra]
|
||||
# [New in Electra]
|
||||
execution_requests=body.execution_requests,
|
||||
)
|
||||
)
|
||||
# Cache execution payload header
|
||||
@@ -1307,9 +1385,13 @@ functionality in Electra.
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# [Modified in Electra:EIP6110]
|
||||
# Disable former deposit mechanism once all prior deposits are processed
|
||||
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
eth1_deposit_index_limit = min(
|
||||
state.eth1_data.deposit_count, state.deposit_requests_start_index
|
||||
)
|
||||
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
assert len(body.deposits) == min(
|
||||
MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index
|
||||
)
|
||||
else:
|
||||
assert len(body.deposits) == 0
|
||||
|
||||
@@ -1319,13 +1401,18 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
|
||||
for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
for_ops(body.attester_slashings, process_attester_slashing)
|
||||
for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
|
||||
# [Modified in Electra:EIP7549]
|
||||
for_ops(body.attestations, process_attestation)
|
||||
for_ops(body.deposits, process_deposit)
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
for_ops(body.execution_requests.deposits, process_deposit_request) # [New in Electra:EIP6110]
|
||||
for_ops(body.execution_requests.withdrawals, process_withdrawal_request) # [New in Electra:EIP7002:EIP7251]
|
||||
for_ops(body.execution_requests.consolidations, process_consolidation_request) # [New in Electra:EIP7251]
|
||||
# [New in Electra:EIP6110]
|
||||
for_ops(body.execution_requests.deposits, process_deposit_request)
|
||||
# [New in Electra:EIP7002:EIP7251]
|
||||
for_ops(body.execution_requests.withdrawals, process_withdrawal_request)
|
||||
# [New in Electra:EIP7251]
|
||||
for_ops(body.execution_requests.consolidations, process_consolidation_request)
|
||||
```
|
||||
|
||||
##### Attestations
|
||||
@@ -1349,7 +1436,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert committee_index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
committee = get_beacon_committee(state, data.slot, committee_index)
|
||||
committee_attesters = set(
|
||||
attester_index for i, attester_index in enumerate(committee)
|
||||
attester_index
|
||||
for i, attester_index in enumerate(committee)
|
||||
if attestation.aggregation_bits[committee_offset + i]
|
||||
)
|
||||
assert len(committee_attesters) > 0
|
||||
@@ -1359,7 +1447,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert len(attestation.aggregation_bits) == committee_offset
|
||||
|
||||
# Participation flag indices
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
|
||||
participation_flag_indices = get_attestation_participation_flag_indices(
|
||||
state, data, state.slot - data.slot
|
||||
)
|
||||
|
||||
# Verify signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
@@ -1373,12 +1463,16 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, attestation):
|
||||
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
if flag_index in participation_flag_indices and not has_flag(
|
||||
epoch_participation[index], flag_index
|
||||
):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
|
||||
# Reward proposer
|
||||
proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
proposer_reward_denominator = (
|
||||
(WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
)
|
||||
proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
@@ -1391,7 +1485,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
compounding withdrawal credential.
|
||||
|
||||
```python
|
||||
def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
|
||||
def get_validator_from_deposit(
|
||||
pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> Validator:
|
||||
validator = Validator(
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
@@ -1405,7 +1501,9 @@ def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes3
|
||||
|
||||
# [Modified in Electra:EIP7251]
|
||||
max_effective_balance = get_max_effective_balance(validator)
|
||||
validator.effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance)
|
||||
validator.effective_balance = min(
|
||||
amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance
|
||||
)
|
||||
|
||||
return validator
|
||||
```
|
||||
@@ -1416,12 +1514,12 @@ def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes3
|
||||
`get_validator_from_deposit`.
|
||||
|
||||
```python
|
||||
def add_validator_to_registry(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64) -> None:
|
||||
def add_validator_to_registry(
|
||||
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> None:
|
||||
index = get_index_for_new_validator(state)
|
||||
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount)
|
||||
set_or_append_list(state.validators, index, validator)
|
||||
set_or_append_list(state.balances, index, amount)
|
||||
set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
@@ -1434,43 +1532,48 @@ def add_validator_to_registry(state: BeaconState,
|
||||
*Note*: The function `apply_deposit` is modified to support EIP7251.
|
||||
|
||||
```python
|
||||
def apply_deposit(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature) -> None:
|
||||
def apply_deposit(
|
||||
state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature,
|
||||
) -> None:
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
add_validator_to_registry(state, pubkey, withdrawal_credentials, Gwei(0)) # [Modified in Electra:EIP7251]
|
||||
# [Modified in Electra:EIP7251]
|
||||
add_validator_to_registry(state, pubkey, withdrawal_credentials, Gwei(0))
|
||||
else:
|
||||
return
|
||||
|
||||
# Increase balance by deposit amount
|
||||
# [Modified in Electra:EIP7251]
|
||||
state.pending_deposits.append(PendingDeposit(
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
signature=signature,
|
||||
slot=GENESIS_SLOT # Use GENESIS_SLOT to distinguish from a pending deposit request
|
||||
))
|
||||
# Increase balance by deposit amount
|
||||
state.pending_deposits.append(
|
||||
PendingDeposit(
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
signature=signature,
|
||||
slot=GENESIS_SLOT, # Use GENESIS_SLOT to distinguish from a pending deposit request
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
###### New `is_valid_deposit_signature`
|
||||
|
||||
```python
|
||||
def is_valid_deposit_signature(pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature) -> bool:
|
||||
def is_valid_deposit_signature(
|
||||
pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature
|
||||
) -> bool:
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=pubkey,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
# Fork-agnostic domain since deposits are valid across forks
|
||||
domain = compute_domain(DOMAIN_DEPOSIT)
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
return bls.Verify(pubkey, signing_root, signature)
|
||||
```
|
||||
@@ -1486,7 +1589,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(deposit.data),
|
||||
branch=deposit.proof,
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
# Add 1 for the List length mix-in
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1,
|
||||
index=state.eth1_deposit_index,
|
||||
root=state.eth1_data.deposit_root,
|
||||
)
|
||||
@@ -1523,10 +1627,14 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
|
||||
assert get_current_epoch(state) >= voluntary_exit.epoch
|
||||
# Verify the validator has been active long enough
|
||||
assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||
# [New in Electra:EIP7251]
|
||||
# Only exit validator if it has no pending withdrawals in the queue
|
||||
assert get_pending_balance_to_withdraw(state, voluntary_exit.validator_index) == 0 # [New in Electra:EIP7251]
|
||||
|
||||
assert get_pending_balance_to_withdraw(state, voluntary_exit.validator_index) == 0
|
||||
# Verify signature
|
||||
domain = compute_domain(DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root)
|
||||
domain = compute_domain(
|
||||
DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root
|
||||
)
|
||||
signing_root = compute_signing_root(voluntary_exit, domain)
|
||||
assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
|
||||
# Initiate exit
|
||||
@@ -1538,15 +1646,15 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
|
||||
###### New `process_withdrawal_request`
|
||||
|
||||
```python
|
||||
def process_withdrawal_request(
|
||||
state: BeaconState,
|
||||
withdrawal_request: WithdrawalRequest
|
||||
) -> None:
|
||||
def process_withdrawal_request(state: BeaconState, withdrawal_request: WithdrawalRequest) -> None:
|
||||
amount = withdrawal_request.amount
|
||||
is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
||||
|
||||
# If partial withdrawal queue is full, only full exits are processed
|
||||
if len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request:
|
||||
if (
|
||||
len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT
|
||||
and not is_full_exit_request
|
||||
):
|
||||
return
|
||||
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
@@ -1583,21 +1691,28 @@ def process_withdrawal_request(
|
||||
return
|
||||
|
||||
has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
has_excess_balance = state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
|
||||
has_excess_balance = (
|
||||
state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
|
||||
)
|
||||
|
||||
# Only allow partial withdrawals with compounding withdrawal credentials
|
||||
if has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance:
|
||||
if (
|
||||
has_compounding_withdrawal_credential(validator)
|
||||
and has_sufficient_effective_balance
|
||||
and has_excess_balance
|
||||
):
|
||||
to_withdraw = min(
|
||||
state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
amount
|
||||
state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw, amount
|
||||
)
|
||||
exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw)
|
||||
withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
state.pending_partial_withdrawals.append(PendingPartialWithdrawal(
|
||||
validator_index=index,
|
||||
amount=to_withdraw,
|
||||
withdrawable_epoch=withdrawable_epoch,
|
||||
))
|
||||
state.pending_partial_withdrawals.append(
|
||||
PendingPartialWithdrawal(
|
||||
validator_index=index,
|
||||
amount=to_withdraw,
|
||||
withdrawable_epoch=withdrawable_epoch,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
##### Deposit requests
|
||||
@@ -1611,13 +1726,15 @@ def process_deposit_request(state: BeaconState, deposit_request: DepositRequest)
|
||||
state.deposit_requests_start_index = deposit_request.index
|
||||
|
||||
# Create pending deposit
|
||||
state.pending_deposits.append(PendingDeposit(
|
||||
pubkey=deposit_request.pubkey,
|
||||
withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
amount=deposit_request.amount,
|
||||
signature=deposit_request.signature,
|
||||
slot=state.slot,
|
||||
))
|
||||
state.pending_deposits.append(
|
||||
PendingDeposit(
|
||||
pubkey=deposit_request.pubkey,
|
||||
withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
amount=deposit_request.amount,
|
||||
signature=deposit_request.signature,
|
||||
slot=state.slot,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
##### Execution layer consolidation requests
|
||||
@@ -1626,8 +1743,7 @@ def process_deposit_request(state: BeaconState, deposit_request: DepositRequest)
|
||||
|
||||
```python
|
||||
def is_valid_switch_to_compounding_request(
|
||||
state: BeaconState,
|
||||
consolidation_request: ConsolidationRequest
|
||||
state: BeaconState, consolidation_request: ConsolidationRequest
|
||||
) -> bool:
|
||||
# Switch to compounding requires source and target be equal
|
||||
if consolidation_request.source_pubkey != consolidation_request.target_pubkey:
|
||||
@@ -1665,8 +1781,7 @@ def is_valid_switch_to_compounding_request(
|
||||
|
||||
```python
|
||||
def process_consolidation_request(
|
||||
state: BeaconState,
|
||||
consolidation_request: ConsolidationRequest
|
||||
state: BeaconState, consolidation_request: ConsolidationRequest
|
||||
) -> None:
|
||||
if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
@@ -1735,8 +1850,7 @@ def process_consolidation_request(
|
||||
source_validator.withdrawable_epoch = Epoch(
|
||||
source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
)
|
||||
state.pending_consolidations.append(PendingConsolidation(
|
||||
source_index=source_index,
|
||||
target_index=target_index
|
||||
))
|
||||
state.pending_consolidations.append(
|
||||
PendingConsolidation(source_index=source_index, target_index=target_index)
|
||||
)
|
||||
```
|
||||
|
||||
@@ -77,61 +77,56 @@ def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||
earliest_exit_epoch += Epoch(1)
|
||||
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=ELECTRA_FORK_VERSION, # [Modified in Electra:EIP6110]
|
||||
# [Modified in Electra]
|
||||
current_version=ELECTRA_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
# [New in Electra:EIP6110]
|
||||
deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||
# [New in Electra:EIP7251]
|
||||
deposit_balance_to_consume=0,
|
||||
# [New in Electra:EIP7251]
|
||||
exit_balance_to_consume=0,
|
||||
# [New in Electra:EIP7251]
|
||||
earliest_exit_epoch=earliest_exit_epoch,
|
||||
# [New in Electra:EIP7251]
|
||||
consolidation_balance_to_consume=0,
|
||||
# [New in Electra:EIP7251]
|
||||
earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)),
|
||||
# [New in Electra:EIP7251]
|
||||
pending_deposits=[],
|
||||
# [New in Electra:EIP7251]
|
||||
pending_partial_withdrawals=[],
|
||||
# [New in Electra:EIP7251]
|
||||
pending_consolidations=[],
|
||||
)
|
||||
|
||||
@@ -140,13 +135,14 @@ def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||
|
||||
# [New in Electra:EIP7251]
|
||||
# add validators that are not yet active to pending balance deposits
|
||||
pre_activation = sorted([
|
||||
index for index, validator in enumerate(post.validators)
|
||||
if validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||
], key=lambda index: (
|
||||
post.validators[index].activation_eligibility_epoch,
|
||||
index
|
||||
))
|
||||
pre_activation = sorted(
|
||||
[
|
||||
index
|
||||
for index, validator in enumerate(post.validators)
|
||||
if validator.activation_epoch == FAR_FUTURE_EPOCH
|
||||
],
|
||||
key=lambda index: (post.validators[index].activation_eligibility_epoch, index),
|
||||
)
|
||||
|
||||
for index in pre_activation:
|
||||
balance = post.balances[index]
|
||||
@@ -156,13 +152,15 @@ def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||
validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
# Use bls.G2_POINT_AT_INFINITY as a signature field placeholder
|
||||
# and GENESIS_SLOT to distinguish from a pending deposit request
|
||||
post.pending_deposits.append(PendingDeposit(
|
||||
pubkey=validator.pubkey,
|
||||
withdrawal_credentials=validator.withdrawal_credentials,
|
||||
amount=balance,
|
||||
signature=bls.G2_POINT_AT_INFINITY,
|
||||
slot=GENESIS_SLOT,
|
||||
))
|
||||
post.pending_deposits.append(
|
||||
PendingDeposit(
|
||||
pubkey=validator.pubkey,
|
||||
withdrawal_credentials=validator.withdrawal_credentials,
|
||||
amount=balance,
|
||||
signature=bls.G2_POINT_AT_INFINITY,
|
||||
slot=GENESIS_SLOT,
|
||||
)
|
||||
)
|
||||
|
||||
# Ensure early adopters of compounding credentials go through the activation churn
|
||||
for index, validator in enumerate(post.validators):
|
||||
|
||||
@@ -23,8 +23,9 @@ protocols uses the original format.
|
||||
### `normalize_merkle_branch`
|
||||
|
||||
```python
|
||||
def normalize_merkle_branch(branch: Sequence[Bytes32],
|
||||
gindex: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
def normalize_merkle_branch(
|
||||
branch: Sequence[Bytes32], gindex: GeneralizedIndex
|
||||
) -> Sequence[Bytes32]:
|
||||
depth = floorlog2(gindex)
|
||||
num_extra = depth - len(branch)
|
||||
return [Bytes32()] * num_extra + [*branch]
|
||||
@@ -51,7 +52,8 @@ def upgrade_lc_bootstrap_to_electra(pre: deneb.LightClientBootstrap) -> LightCli
|
||||
header=upgrade_lc_header_to_electra(pre.header),
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
current_sync_committee_branch=normalize_merkle_branch(
|
||||
pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA),
|
||||
pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
@@ -61,29 +63,32 @@ def upgrade_lc_update_to_electra(pre: deneb.LightClientUpdate) -> LightClientUpd
|
||||
attested_header=upgrade_lc_header_to_electra(pre.attested_header),
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
next_sync_committee_branch=normalize_merkle_branch(
|
||||
pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA),
|
||||
pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA
|
||||
),
|
||||
finalized_header=upgrade_lc_header_to_electra(pre.finalized_header),
|
||||
finality_branch=normalize_merkle_branch(
|
||||
pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
|
||||
finality_branch=normalize_merkle_branch(pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_finality_update_to_electra(pre: deneb.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
|
||||
def upgrade_lc_finality_update_to_electra(
|
||||
pre: deneb.LightClientFinalityUpdate,
|
||||
) -> LightClientFinalityUpdate:
|
||||
return LightClientFinalityUpdate(
|
||||
attested_header=upgrade_lc_header_to_electra(pre.attested_header),
|
||||
finalized_header=upgrade_lc_header_to_electra(pre.finalized_header),
|
||||
finality_branch=normalize_merkle_branch(
|
||||
pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
|
||||
finality_branch=normalize_merkle_branch(pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
signature_slot=pre.signature_slot,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def upgrade_lc_optimistic_update_to_electra(pre: deneb.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
|
||||
def upgrade_lc_optimistic_update_to_electra(
|
||||
pre: deneb.LightClientOptimisticUpdate,
|
||||
) -> LightClientOptimisticUpdate:
|
||||
return LightClientOptimisticUpdate(
|
||||
attested_header=upgrade_lc_header_to_electra(pre.attested_header),
|
||||
sync_aggregate=pre.sync_aggregate,
|
||||
|
||||
@@ -122,7 +122,9 @@ aggregate from a list of network aggregates with equal `AttestationData`:
|
||||
|
||||
```python
|
||||
def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:
|
||||
aggregates = sorted(network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0])
|
||||
aggregates = sorted(
|
||||
network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0]
|
||||
)
|
||||
|
||||
data = aggregates[0].data
|
||||
aggregation_bits = Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT]()
|
||||
@@ -152,7 +154,9 @@ result of the following function:
|
||||
|
||||
```python
|
||||
def get_eth1_pending_deposit_count(state: BeaconState) -> uint64:
|
||||
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
eth1_deposit_index_limit = min(
|
||||
state.eth1_data.deposit_count, state.deposit_requests_start_index
|
||||
)
|
||||
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
else:
|
||||
@@ -173,7 +177,8 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||
period_start = voting_period_start_time(state)
|
||||
# `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height
|
||||
votes_to_consider = [
|
||||
get_eth1_data(block) for block in eth1_chain
|
||||
get_eth1_data(block)
|
||||
for block in eth1_chain
|
||||
if (
|
||||
is_candidate_block(block, period_start)
|
||||
# Ensure cannot move back to earlier deposit contract states
|
||||
@@ -187,12 +192,18 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
|
||||
# Non-substantive casting for linter
|
||||
state_eth1_data: Eth1Data = state.eth1_data
|
||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||
default_vote = (
|
||||
votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||
)
|
||||
|
||||
return max(
|
||||
valid_votes,
|
||||
key=lambda v: (valid_votes.count(v), -valid_votes.index(v)), # Tiebreak by smallest distance
|
||||
default=default_vote
|
||||
# Tiebreak by smallest distance
|
||||
key=lambda v: (
|
||||
valid_votes.count(v),
|
||||
-valid_votes.index(v),
|
||||
),
|
||||
default=default_vote,
|
||||
)
|
||||
```
|
||||
|
||||
@@ -209,11 +220,13 @@ processed through any empty slots up to the assigned slot using
|
||||
`get_expected_withdrawals`.
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine,
|
||||
) -> Optional[PayloadId]:
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
@@ -278,18 +291,15 @@ def get_execution_requests(execution_requests_list: Sequence[bytes]) -> Executio
|
||||
|
||||
if request_type == DEPOSIT_REQUEST_TYPE:
|
||||
deposits = ssz_deserialize(
|
||||
List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD],
|
||||
request_data
|
||||
List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD], request_data
|
||||
)
|
||||
elif request_type == WITHDRAWAL_REQUEST_TYPE:
|
||||
withdrawals = ssz_deserialize(
|
||||
List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD],
|
||||
request_data
|
||||
List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD], request_data
|
||||
)
|
||||
elif request_type == CONSOLIDATION_REQUEST_TYPE:
|
||||
consolidations = ssz_deserialize(
|
||||
List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD],
|
||||
request_data
|
||||
List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD], request_data
|
||||
)
|
||||
|
||||
return ExecutionRequests(
|
||||
|
||||
@@ -58,7 +58,9 @@ A brief reference for what these values look like in practice
|
||||
#### Modified `is_within_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
||||
def is_within_weak_subjectivity_period(
|
||||
store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint
|
||||
) -> bool:
|
||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||
assert ws_state.latest_block_header.state_root == ws_checkpoint.root
|
||||
assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch
|
||||
|
||||
@@ -41,7 +41,9 @@ and is under active development.
|
||||
##### Modified `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||
def process_execution_payload(
|
||||
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
|
||||
) -> None:
|
||||
payload = body.execution_payload
|
||||
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
@@ -50,10 +52,12 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
# Verify timestamp
|
||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= get_max_blobs_per_block(get_current_epoch(state)) # [Modified in Fulu:EIP7892]
|
||||
# [Modified in Fulu:EIP7892] Verify commitments are under limit
|
||||
assert len(body.blob_kzg_commitments) <= get_max_blobs_per_block(get_current_epoch(state))
|
||||
# Verify the execution payload is valid
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||
versioned_hashes = [
|
||||
kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments
|
||||
]
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
NewPayloadRequest(
|
||||
execution_payload=payload,
|
||||
@@ -150,7 +154,9 @@ class BeaconState(Container):
|
||||
#### New `compute_proposer_indices`
|
||||
|
||||
```python
|
||||
def compute_proposer_indices(state: BeaconState, epoch: Epoch, seed: Bytes32, indices: Sequence[ValidatorIndex]) -> List[ValidatorIndex, SLOTS_PER_EPOCH]:
|
||||
def compute_proposer_indices(
|
||||
state: BeaconState, epoch: Epoch, seed: Bytes32, indices: Sequence[ValidatorIndex]
|
||||
) -> List[ValidatorIndex, SLOTS_PER_EPOCH]:
|
||||
"""
|
||||
Return the proposer indices for the given ``epoch``.
|
||||
"""
|
||||
@@ -177,7 +183,9 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
||||
#### New `get_beacon_proposer_indices`
|
||||
|
||||
```python
|
||||
def get_beacon_proposer_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex, SLOTS_PER_EPOCH]:
|
||||
def get_beacon_proposer_indices(
|
||||
state: BeaconState, epoch: Epoch
|
||||
) -> List[ValidatorIndex, SLOTS_PER_EPOCH]:
|
||||
"""
|
||||
Return the proposer indices for the given ``epoch``.
|
||||
"""
|
||||
@@ -227,6 +235,8 @@ def process_proposer_lookahead(state: BeaconState) -> None:
|
||||
# Shift out proposers in the first epoch
|
||||
state.proposer_lookahead[:last_epoch_start] = state.proposer_lookahead[SLOTS_PER_EPOCH:]
|
||||
# Fill in the last epoch with new proposer indices
|
||||
last_epoch_proposers = get_beacon_proposer_indices(state, Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1))
|
||||
last_epoch_proposers = get_beacon_proposer_indices(
|
||||
state, Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1)
|
||||
)
|
||||
state.proposer_lookahead[last_epoch_start:] = last_epoch_proposers
|
||||
```
|
||||
|
||||
@@ -118,8 +118,7 @@ def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence
|
||||
custody_groups: List[CustodyIndex] = []
|
||||
while len(custody_groups) < custody_group_count:
|
||||
custody_group = CustodyIndex(
|
||||
bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8])
|
||||
% NUMBER_OF_CUSTODY_GROUPS
|
||||
bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8]) % NUMBER_OF_CUSTODY_GROUPS
|
||||
)
|
||||
if custody_group not in custody_groups:
|
||||
custody_groups.append(custody_group)
|
||||
@@ -153,8 +152,7 @@ def compute_columns_for_custody_group(custody_group: CustodyIndex) -> Sequence[C
|
||||
assert custody_group < NUMBER_OF_CUSTODY_GROUPS
|
||||
columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS
|
||||
return [
|
||||
ColumnIndex(NUMBER_OF_CUSTODY_GROUPS * i + custody_group)
|
||||
for i in range(columns_per_group)
|
||||
ColumnIndex(NUMBER_OF_CUSTODY_GROUPS * i + custody_group) for i in range(columns_per_group)
|
||||
]
|
||||
```
|
||||
|
||||
@@ -172,19 +170,23 @@ def compute_matrix(blobs: Sequence[Blob]) -> Sequence[MatrixEntry]:
|
||||
for blob_index, blob in enumerate(blobs):
|
||||
cells, proofs = compute_cells_and_kzg_proofs(blob)
|
||||
for cell_index, (cell, proof) in enumerate(zip(cells, proofs)):
|
||||
matrix.append(MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
))
|
||||
matrix.append(
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
```
|
||||
|
||||
### `recover_matrix`
|
||||
|
||||
```python
|
||||
def recover_matrix(partial_matrix: Sequence[MatrixEntry], blob_count: uint64) -> Sequence[MatrixEntry]:
|
||||
def recover_matrix(
|
||||
partial_matrix: Sequence[MatrixEntry], blob_count: uint64
|
||||
) -> Sequence[MatrixEntry]:
|
||||
"""
|
||||
Recover the full, flattened sequence of matrix entries.
|
||||
|
||||
@@ -197,12 +199,14 @@ def recover_matrix(partial_matrix: Sequence[MatrixEntry], blob_count: uint64) ->
|
||||
cells = [e.cell for e in partial_matrix if e.row_index == blob_index]
|
||||
recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells)
|
||||
for cell_index, (cell, proof) in enumerate(zip(recovered_cells, recovered_proofs)):
|
||||
matrix.append(MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
))
|
||||
matrix.append(
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
```
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ def compute_fork_version(epoch: Epoch) -> Version:
|
||||
|
||||
```python
|
||||
def initialize_proposer_lookahead(
|
||||
state: electra.BeaconState
|
||||
state: electra.BeaconState,
|
||||
) -> List[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH]:
|
||||
"""
|
||||
Return the proposer indices for the full available lookahead starting from current epoch.
|
||||
@@ -91,54 +91,40 @@ change is made to upgrade to Fulu.
|
||||
def upgrade_to_fulu(pre: electra.BeaconState) -> BeaconState:
|
||||
epoch = electra.get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=FULU_FORK_VERSION, # [Modified in Fulu]
|
||||
# [Modified in Fulu]
|
||||
current_version=FULU_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
# On-chain deposits
|
||||
deposit_requests_start_index=pre.deposit_requests_start_index,
|
||||
# Consolidations
|
||||
deposit_balance_to_consume=pre.deposit_balance_to_consume,
|
||||
exit_balance_to_consume=pre.exit_balance_to_consume,
|
||||
earliest_exit_epoch=pre.earliest_exit_epoch,
|
||||
@@ -147,7 +133,8 @@ def upgrade_to_fulu(pre: electra.BeaconState) -> BeaconState:
|
||||
pending_deposits=pre.pending_deposits,
|
||||
pending_partial_withdrawals=pre.pending_partial_withdrawals,
|
||||
pending_consolidations=pre.pending_consolidations,
|
||||
proposer_lookahead=initialize_proposer_lookahead(pre), # [New in Fulu:EIP7917]
|
||||
# [New in Fulu:EIP7917]
|
||||
proposer_lookahead=initialize_proposer_lookahead(pre),
|
||||
)
|
||||
|
||||
return post
|
||||
|
||||
@@ -90,7 +90,9 @@ def verify_data_column_sidecar(sidecar: DataColumnSidecar) -> bool:
|
||||
return False
|
||||
|
||||
# The column length must be equal to the number of commitments/proofs
|
||||
if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len(sidecar.kzg_proofs):
|
||||
if len(sidecar.column) != len(sidecar.kzg_commitments) or len(sidecar.column) != len(
|
||||
sidecar.kzg_proofs
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -122,7 +124,7 @@ def verify_data_column_sidecar_inclusion_proof(sidecar: DataColumnSidecar) -> bo
|
||||
"""
|
||||
Verify if the given KZG commitments included in the given beacon block.
|
||||
"""
|
||||
gindex = get_subtree_index(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments'))
|
||||
gindex = get_subtree_index(get_generalized_index(BeaconBlockBody, "blob_kzg_commitments"))
|
||||
return is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(sidecar.kzg_commitments),
|
||||
branch=sidecar.kzg_commitments_inclusion_proof,
|
||||
|
||||
@@ -56,15 +56,19 @@ def get_extended_sample_count(allowed_failures: uint64) -> uint64:
|
||||
M = int(M)
|
||||
n = int(n)
|
||||
N = int(N)
|
||||
return sum([math_comb(n, i) * math_comb(M - n, N - i) / math_comb(M, N)
|
||||
for i in range(k + 1)])
|
||||
return sum(
|
||||
[math_comb(n, i) * math_comb(M - n, N - i) / math_comb(M, N) for i in range(k + 1)]
|
||||
)
|
||||
|
||||
worst_case_missing = NUMBER_OF_COLUMNS // 2 + 1
|
||||
false_positive_threshold = hypergeom_cdf(0, NUMBER_OF_COLUMNS,
|
||||
worst_case_missing, SAMPLES_PER_SLOT)
|
||||
false_positive_threshold = hypergeom_cdf(
|
||||
0, NUMBER_OF_COLUMNS, worst_case_missing, SAMPLES_PER_SLOT
|
||||
)
|
||||
for sample_count in range(SAMPLES_PER_SLOT, NUMBER_OF_COLUMNS + 1):
|
||||
if hypergeom_cdf(allowed_failures, NUMBER_OF_COLUMNS,
|
||||
worst_case_missing, sample_count) <= false_positive_threshold:
|
||||
if (
|
||||
hypergeom_cdf(allowed_failures, NUMBER_OF_COLUMNS, worst_case_missing, sample_count)
|
||||
<= false_positive_threshold
|
||||
):
|
||||
break
|
||||
return sample_count
|
||||
```
|
||||
|
||||
@@ -139,7 +139,9 @@ def coset_evals_to_cell(coset_evals: CosetEvals) -> Cell:
|
||||
#### `_fft_field`
|
||||
|
||||
```python
|
||||
def _fft_field(vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
|
||||
def _fft_field(
|
||||
vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement]
|
||||
) -> Sequence[BLSFieldElement]:
|
||||
if len(vals) == 1:
|
||||
return vals
|
||||
L = _fft_field(vals[::2], roots_of_unity[::2])
|
||||
@@ -155,13 +157,16 @@ def _fft_field(vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFiel
|
||||
#### `fft_field`
|
||||
|
||||
```python
|
||||
def fft_field(vals: Sequence[BLSFieldElement],
|
||||
roots_of_unity: Sequence[BLSFieldElement],
|
||||
inv: bool=False) -> Sequence[BLSFieldElement]:
|
||||
def fft_field(
|
||||
vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement], inv: bool = False
|
||||
) -> Sequence[BLSFieldElement]:
|
||||
if inv:
|
||||
# Inverse FFT
|
||||
invlen = BLSFieldElement(len(vals)).pow(BLSFieldElement(BLS_MODULUS - 2))
|
||||
return [x * invlen for x in _fft_field(vals, list(roots_of_unity[0:1]) + list(roots_of_unity[:0:-1]))]
|
||||
return [
|
||||
x * invlen
|
||||
for x in _fft_field(vals, list(roots_of_unity[0:1]) + list(roots_of_unity[:0:-1]))
|
||||
]
|
||||
else:
|
||||
# Regular FFT
|
||||
return _fft_field(vals, roots_of_unity)
|
||||
@@ -170,9 +175,9 @@ def fft_field(vals: Sequence[BLSFieldElement],
|
||||
#### `coset_fft_field`
|
||||
|
||||
```python
|
||||
def coset_fft_field(vals: Sequence[BLSFieldElement],
|
||||
roots_of_unity: Sequence[BLSFieldElement],
|
||||
inv: bool=False) -> Sequence[BLSFieldElement]:
|
||||
def coset_fft_field(
|
||||
vals: Sequence[BLSFieldElement], roots_of_unity: Sequence[BLSFieldElement], inv: bool = False
|
||||
) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Computes an FFT/IFFT over a coset of the roots of unity.
|
||||
This is useful for when one wants to divide by a polynomial which
|
||||
@@ -180,7 +185,9 @@ def coset_fft_field(vals: Sequence[BLSFieldElement],
|
||||
"""
|
||||
vals = [v for v in vals] # copy
|
||||
|
||||
def shift_vals(vals: Sequence[BLSFieldElement], factor: BLSFieldElement) -> Sequence[BLSFieldElement]:
|
||||
def shift_vals(
|
||||
vals: Sequence[BLSFieldElement], factor: BLSFieldElement
|
||||
) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Multiply each entry in `vals` by succeeding powers of `factor`
|
||||
i.e., [vals[0] * factor^0, vals[1] * factor^1, ..., vals[n] * factor^n]
|
||||
@@ -206,11 +213,13 @@ def coset_fft_field(vals: Sequence[BLSFieldElement],
|
||||
#### `compute_verify_cell_kzg_proof_batch_challenge`
|
||||
|
||||
```python
|
||||
def compute_verify_cell_kzg_proof_batch_challenge(commitments: Sequence[KZGCommitment],
|
||||
commitment_indices: Sequence[CommitmentIndex],
|
||||
cell_indices: Sequence[CellIndex],
|
||||
cosets_evals: Sequence[CosetEvals],
|
||||
proofs: Sequence[KZGProof]) -> BLSFieldElement:
|
||||
def compute_verify_cell_kzg_proof_batch_challenge(
|
||||
commitments: Sequence[KZGCommitment],
|
||||
commitment_indices: Sequence[CommitmentIndex],
|
||||
cell_indices: Sequence[CellIndex],
|
||||
cosets_evals: Sequence[CosetEvals],
|
||||
proofs: Sequence[KZGProof],
|
||||
) -> BLSFieldElement:
|
||||
"""
|
||||
Compute a random challenge ``r`` used in the universal verification equation. To compute the
|
||||
challenge, ``RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN`` and all data that can influence the
|
||||
@@ -243,7 +252,9 @@ def polynomial_eval_to_coeff(polynomial: Polynomial) -> PolynomialCoeff:
|
||||
Interpolates a polynomial (given in evaluation form) to a polynomial in coefficient form.
|
||||
"""
|
||||
roots_of_unity = compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB)
|
||||
return PolynomialCoeff(fft_field(bit_reversal_permutation(polynomial), roots_of_unity, inv=True))
|
||||
return PolynomialCoeff(
|
||||
fft_field(bit_reversal_permutation(polynomial), roots_of_unity, inv=True)
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_polynomialcoeff`
|
||||
@@ -255,7 +266,9 @@ def add_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoe
|
||||
"""
|
||||
a, b = (a, b) if len(a) >= len(b) else (b, a)
|
||||
length_a, length_b = len(a), len(b)
|
||||
return PolynomialCoeff([a[i] + (b[i] if i < length_b else BLSFieldElement(0)) for i in range(length_a)])
|
||||
return PolynomialCoeff(
|
||||
[a[i] + (b[i] if i < length_b else BLSFieldElement(0)) for i in range(length_a)]
|
||||
)
|
||||
```
|
||||
|
||||
#### `multiply_polynomialcoeff`
|
||||
@@ -299,7 +312,9 @@ def divide_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> Polynomial
|
||||
#### `interpolate_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def interpolate_polynomialcoeff(xs: Sequence[BLSFieldElement], ys: Sequence[BLSFieldElement]) -> PolynomialCoeff:
|
||||
def interpolate_polynomialcoeff(
|
||||
xs: Sequence[BLSFieldElement], ys: Sequence[BLSFieldElement]
|
||||
) -> PolynomialCoeff:
|
||||
"""
|
||||
Lagrange interpolation: Finds the lowest degree polynomial that takes the value ``ys[i]`` at ``x[i]`` for all i.
|
||||
Outputs a coefficient form polynomial. Leading coefficients may be zero.
|
||||
@@ -335,7 +350,9 @@ def vanishing_polynomialcoeff(xs: Sequence[BLSFieldElement]) -> PolynomialCoeff:
|
||||
#### `evaluate_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def evaluate_polynomialcoeff(polynomial_coeff: PolynomialCoeff, z: BLSFieldElement) -> BLSFieldElement:
|
||||
def evaluate_polynomialcoeff(
|
||||
polynomial_coeff: PolynomialCoeff, z: BLSFieldElement
|
||||
) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluate a coefficient form polynomial at ``z`` using Horner's schema.
|
||||
"""
|
||||
@@ -353,8 +370,8 @@ Extended KZG functions for multiproofs
|
||||
|
||||
```python
|
||||
def compute_kzg_proof_multi_impl(
|
||||
polynomial_coeff: PolynomialCoeff,
|
||||
zs: Coset) -> Tuple[KZGProof, CosetEvals]:
|
||||
polynomial_coeff: PolynomialCoeff, zs: Coset
|
||||
) -> Tuple[KZGProof, CosetEvals]:
|
||||
"""
|
||||
Compute a KZG multi-evaluation proof for a set of `k` points.
|
||||
|
||||
@@ -377,17 +394,21 @@ def compute_kzg_proof_multi_impl(
|
||||
# Compute the quotient polynomial directly in monomial form
|
||||
quotient_polynomial = divide_polynomialcoeff(polynomial_coeff, denominator_poly)
|
||||
|
||||
return KZGProof(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(quotient_polynomial)], quotient_polynomial)), ys
|
||||
return KZGProof(
|
||||
g1_lincomb(KZG_SETUP_G1_MONOMIAL[: len(quotient_polynomial)], quotient_polynomial)
|
||||
), ys
|
||||
```
|
||||
|
||||
#### `verify_cell_kzg_proof_batch_impl`
|
||||
|
||||
```python
|
||||
def verify_cell_kzg_proof_batch_impl(commitments: Sequence[KZGCommitment],
|
||||
commitment_indices: Sequence[CommitmentIndex],
|
||||
cell_indices: Sequence[CellIndex],
|
||||
cosets_evals: Sequence[CosetEvals],
|
||||
proofs: Sequence[KZGProof]) -> bool:
|
||||
def verify_cell_kzg_proof_batch_impl(
|
||||
commitments: Sequence[KZGCommitment],
|
||||
commitment_indices: Sequence[CommitmentIndex],
|
||||
cell_indices: Sequence[CellIndex],
|
||||
cosets_evals: Sequence[CosetEvals],
|
||||
proofs: Sequence[KZGProof],
|
||||
) -> bool:
|
||||
"""
|
||||
Helper: Verify that a set of cells belong to their corresponding commitment.
|
||||
|
||||
@@ -429,11 +450,7 @@ def verify_cell_kzg_proof_batch_impl(commitments: Sequence[KZGCommitment],
|
||||
|
||||
# Step 1: Compute a challenge r and its powers r^0, ..., r^{num_cells-1}
|
||||
r = compute_verify_cell_kzg_proof_batch_challenge(
|
||||
commitments,
|
||||
commitment_indices,
|
||||
cell_indices,
|
||||
cosets_evals,
|
||||
proofs
|
||||
commitments, commitment_indices, cell_indices, cosets_evals, proofs
|
||||
)
|
||||
r_powers = compute_powers(r, num_cells)
|
||||
|
||||
@@ -458,9 +475,15 @@ def verify_cell_kzg_proof_batch_impl(commitments: Sequence[KZGCommitment],
|
||||
# Note: an efficient implementation would use the IDFT based method explained in the blog post
|
||||
sum_interp_polys_coeff = PolynomialCoeff([BLSFieldElement(0)] * n)
|
||||
for k in range(num_cells):
|
||||
interp_poly_coeff = interpolate_polynomialcoeff(coset_for_cell(cell_indices[k]), cosets_evals[k])
|
||||
interp_poly_scaled_coeff = multiply_polynomialcoeff(PolynomialCoeff([r_powers[k]]), interp_poly_coeff)
|
||||
sum_interp_polys_coeff = add_polynomialcoeff(sum_interp_polys_coeff, interp_poly_scaled_coeff)
|
||||
interp_poly_coeff = interpolate_polynomialcoeff(
|
||||
coset_for_cell(cell_indices[k]), cosets_evals[k]
|
||||
)
|
||||
interp_poly_scaled_coeff = multiply_polynomialcoeff(
|
||||
PolynomialCoeff([r_powers[k]]), interp_poly_coeff
|
||||
)
|
||||
sum_interp_polys_coeff = add_polynomialcoeff(
|
||||
sum_interp_polys_coeff, interp_poly_scaled_coeff
|
||||
)
|
||||
rli = bls.bytes48_to_G1(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:n], sum_interp_polys_coeff))
|
||||
|
||||
# Step 4.3: Compute RLP = sum_k (r^k * h_k^n) proofs[k]
|
||||
@@ -477,10 +500,12 @@ def verify_cell_kzg_proof_batch_impl(commitments: Sequence[KZGCommitment],
|
||||
rl = bls.add(rl, rlp)
|
||||
|
||||
# Step 5: Check pairing (LL, LR) = pairing (RL, [1])
|
||||
return (bls.pairing_check([
|
||||
[ll, lr],
|
||||
[rl, bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0]))],
|
||||
]))
|
||||
return bls.pairing_check(
|
||||
[
|
||||
[ll, lr],
|
||||
[rl, bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0]))],
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### Cell cosets
|
||||
@@ -518,7 +543,11 @@ def coset_for_cell(cell_index: CellIndex) -> Coset:
|
||||
roots_of_unity_brp = bit_reversal_permutation(
|
||||
compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
|
||||
)
|
||||
return Coset(roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_index:FIELD_ELEMENTS_PER_CELL * (cell_index + 1)])
|
||||
return Coset(
|
||||
roots_of_unity_brp[
|
||||
FIELD_ELEMENTS_PER_CELL * cell_index : FIELD_ELEMENTS_PER_CELL * (cell_index + 1)
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
## Cells
|
||||
@@ -550,9 +579,9 @@ def compute_cells(blob: Blob) -> Vector[Cell, CELLS_PER_EXT_BLOB]:
|
||||
#### `compute_cells_and_kzg_proofs_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff: PolynomialCoeff) -> Tuple[
|
||||
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||
def compute_cells_and_kzg_proofs_polynomialcoeff(
|
||||
polynomial_coeff: PolynomialCoeff,
|
||||
) -> Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||
"""
|
||||
Helper function which computes cells/proofs for a polynomial in coefficient form.
|
||||
"""
|
||||
@@ -568,9 +597,9 @@ def compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff: PolynomialCoe
|
||||
#### `compute_cells_and_kzg_proofs`
|
||||
|
||||
```python
|
||||
def compute_cells_and_kzg_proofs(blob: Blob) -> Tuple[
|
||||
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||
def compute_cells_and_kzg_proofs(
|
||||
blob: Blob,
|
||||
) -> Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||
"""
|
||||
Compute all the cell proofs for an extended blob. This is an inefficient O(n^2) algorithm,
|
||||
for performant implementation the FK20 algorithm that runs in O(n log n) should be
|
||||
@@ -590,10 +619,12 @@ def compute_cells_and_kzg_proofs(blob: Blob) -> Tuple[
|
||||
#### `verify_cell_kzg_proof_batch`
|
||||
|
||||
```python
|
||||
def verify_cell_kzg_proof_batch(commitments_bytes: Sequence[Bytes48],
|
||||
cell_indices: Sequence[CellIndex],
|
||||
cells: Sequence[Cell],
|
||||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||
def verify_cell_kzg_proof_batch(
|
||||
commitments_bytes: Sequence[Bytes48],
|
||||
cell_indices: Sequence[CellIndex],
|
||||
cells: Sequence[Cell],
|
||||
proofs_bytes: Sequence[Bytes48],
|
||||
) -> bool:
|
||||
"""
|
||||
Verify that a set of cells belong to their corresponding commitments.
|
||||
|
||||
@@ -618,22 +649,22 @@ def verify_cell_kzg_proof_batch(commitments_bytes: Sequence[Bytes48],
|
||||
assert len(proof_bytes) == BYTES_PER_PROOF
|
||||
|
||||
# Create the list of deduplicated commitments we are dealing with
|
||||
deduplicated_commitments = [bytes_to_kzg_commitment(commitment_bytes)
|
||||
for commitment_bytes in set(commitments_bytes)]
|
||||
deduplicated_commitments = [
|
||||
bytes_to_kzg_commitment(commitment_bytes) for commitment_bytes in set(commitments_bytes)
|
||||
]
|
||||
# Create indices list mapping initial commitments (that may contain duplicates) to the deduplicated commitments
|
||||
commitment_indices = [CommitmentIndex(deduplicated_commitments.index(commitment_bytes))
|
||||
for commitment_bytes in commitments_bytes]
|
||||
commitment_indices = [
|
||||
CommitmentIndex(deduplicated_commitments.index(commitment_bytes))
|
||||
for commitment_bytes in commitments_bytes
|
||||
]
|
||||
|
||||
cosets_evals = [cell_to_coset_evals(cell) for cell in cells]
|
||||
proofs = [bytes_to_kzg_proof(proof_bytes) for proof_bytes in proofs_bytes]
|
||||
|
||||
# Do the actual verification
|
||||
return verify_cell_kzg_proof_batch_impl(
|
||||
deduplicated_commitments,
|
||||
commitment_indices,
|
||||
cell_indices,
|
||||
cosets_evals,
|
||||
proofs)
|
||||
deduplicated_commitments, commitment_indices, cell_indices, cosets_evals, proofs
|
||||
)
|
||||
```
|
||||
|
||||
## Reconstruction
|
||||
@@ -641,7 +672,9 @@ def verify_cell_kzg_proof_batch(commitments_bytes: Sequence[Bytes48],
|
||||
### `construct_vanishing_polynomial`
|
||||
|
||||
```python
|
||||
def construct_vanishing_polynomial(missing_cell_indices: Sequence[CellIndex]) -> Sequence[BLSFieldElement]:
|
||||
def construct_vanishing_polynomial(
|
||||
missing_cell_indices: Sequence[CellIndex],
|
||||
) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Given the cells indices that are missing from the data, compute the polynomial that vanishes at every point that
|
||||
corresponds to a missing field element.
|
||||
@@ -656,10 +689,12 @@ def construct_vanishing_polynomial(missing_cell_indices: Sequence[CellIndex]) ->
|
||||
roots_of_unity_reduced = compute_roots_of_unity(CELLS_PER_EXT_BLOB)
|
||||
|
||||
# Compute polynomial that vanishes at all the missing cells (over the small domain)
|
||||
short_zero_poly = vanishing_polynomialcoeff([
|
||||
roots_of_unity_reduced[reverse_bits(missing_cell_index, CELLS_PER_EXT_BLOB)]
|
||||
for missing_cell_index in missing_cell_indices
|
||||
])
|
||||
short_zero_poly = vanishing_polynomialcoeff(
|
||||
[
|
||||
roots_of_unity_reduced[reverse_bits(missing_cell_index, CELLS_PER_EXT_BLOB)]
|
||||
for missing_cell_index in missing_cell_indices
|
||||
]
|
||||
)
|
||||
|
||||
# Extend vanishing polynomial to full domain using the closed form of the vanishing polynomial over a coset
|
||||
zero_poly_coeff = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_EXT_BLOB
|
||||
@@ -672,8 +707,9 @@ def construct_vanishing_polynomial(missing_cell_indices: Sequence[CellIndex]) ->
|
||||
### `recover_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def recover_polynomialcoeff(cell_indices: Sequence[CellIndex],
|
||||
cosets_evals: Sequence[CosetEvals]) -> PolynomialCoeff:
|
||||
def recover_polynomialcoeff(
|
||||
cell_indices: Sequence[CellIndex], cosets_evals: Sequence[CosetEvals]
|
||||
) -> PolynomialCoeff:
|
||||
"""
|
||||
Recover the polynomial in coefficient form that when evaluated at the roots of unity will give the extended blob.
|
||||
"""
|
||||
@@ -693,8 +729,11 @@ def recover_polynomialcoeff(cell_indices: Sequence[CellIndex],
|
||||
|
||||
# Compute the vanishing polynomial Z(x) in coefficient form.
|
||||
# Z(x) is the polynomial which vanishes on all of the evaluations which are missing.
|
||||
missing_cell_indices = [CellIndex(cell_index) for cell_index in range(CELLS_PER_EXT_BLOB)
|
||||
if cell_index not in cell_indices]
|
||||
missing_cell_indices = [
|
||||
CellIndex(cell_index)
|
||||
for cell_index in range(CELLS_PER_EXT_BLOB)
|
||||
if cell_index not in cell_indices
|
||||
]
|
||||
zero_poly_coeff = construct_vanishing_polynomial(missing_cell_indices)
|
||||
|
||||
# Convert Z(x) to evaluation form over the FFT domain
|
||||
@@ -709,22 +748,30 @@ def recover_polynomialcoeff(cell_indices: Sequence[CellIndex],
|
||||
# and we know that (P*Z)(x) has degree at most FIELD_ELEMENTS_PER_EXT_BLOB - 1.
|
||||
# Thus, an inverse FFT of the evaluations of (E*Z)(x) (= evaluations of (P*Z)(x))
|
||||
# yields the coefficient form of (P*Z)(x).
|
||||
extended_evaluation_times_zero_coeffs = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv=True)
|
||||
extended_evaluation_times_zero_coeffs = fft_field(
|
||||
extended_evaluation_times_zero, roots_of_unity_extended, inv=True
|
||||
)
|
||||
|
||||
# Next step is to divide the polynomial (P*Z)(x) by polynomial Z(x) to get P(x).
|
||||
# We do this in evaluation form over a coset of the FFT domain to avoid division by 0.
|
||||
|
||||
# Convert (P*Z)(x) to evaluation form over a coset of the FFT domain
|
||||
extended_evaluations_over_coset = coset_fft_field(extended_evaluation_times_zero_coeffs, roots_of_unity_extended)
|
||||
extended_evaluations_over_coset = coset_fft_field(
|
||||
extended_evaluation_times_zero_coeffs, roots_of_unity_extended
|
||||
)
|
||||
|
||||
# Convert Z(x) to evaluation form over a coset of the FFT domain
|
||||
zero_poly_over_coset = coset_fft_field(zero_poly_coeff, roots_of_unity_extended)
|
||||
|
||||
# Compute P(x) = (P*Z)(x) / Z(x) in evaluation form over a coset of the FFT domain
|
||||
reconstructed_poly_over_coset = [a / b for a, b in zip(extended_evaluations_over_coset, zero_poly_over_coset)]
|
||||
reconstructed_poly_over_coset = [
|
||||
a / b for a, b in zip(extended_evaluations_over_coset, zero_poly_over_coset)
|
||||
]
|
||||
|
||||
# Convert P(x) to coefficient form
|
||||
reconstructed_poly_coeff = coset_fft_field(reconstructed_poly_over_coset, roots_of_unity_extended, inv=True)
|
||||
reconstructed_poly_coeff = coset_fft_field(
|
||||
reconstructed_poly_over_coset, roots_of_unity_extended, inv=True
|
||||
)
|
||||
|
||||
return PolynomialCoeff(reconstructed_poly_coeff[:FIELD_ELEMENTS_PER_BLOB])
|
||||
```
|
||||
@@ -732,10 +779,9 @@ def recover_polynomialcoeff(cell_indices: Sequence[CellIndex],
|
||||
### `recover_cells_and_kzg_proofs`
|
||||
|
||||
```python
|
||||
def recover_cells_and_kzg_proofs(cell_indices: Sequence[CellIndex],
|
||||
cells: Sequence[Cell]) -> Tuple[
|
||||
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||
def recover_cells_and_kzg_proofs(
|
||||
cell_indices: Sequence[CellIndex], cells: Sequence[Cell]
|
||||
) -> Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||
"""
|
||||
Given at least 50% of cells for a blob, recover all the cells/proofs.
|
||||
This algorithm uses FFTs to recover cells faster than using Lagrange
|
||||
|
||||
@@ -119,8 +119,12 @@ slot, with a minimum of `VALIDATOR_CUSTODY_REQUIREMENT` and of course a maximum
|
||||
of `NUMBER_OF_CUSTODY_GROUPS`.
|
||||
|
||||
```python
|
||||
def get_validators_custody_requirement(state: BeaconState, validator_indices: Sequence[ValidatorIndex]) -> uint64:
|
||||
total_node_balance = sum(state.validators[index].effective_balance for index in validator_indices)
|
||||
def get_validators_custody_requirement(
|
||||
state: BeaconState, validator_indices: Sequence[ValidatorIndex]
|
||||
) -> uint64:
|
||||
total_node_balance = sum(
|
||||
state.validators[index].effective_balance for index in validator_indices
|
||||
)
|
||||
count = total_node_balance // BALANCE_PER_ADDITIONAL_CUSTODY_GROUP
|
||||
return min(max(count, VALIDATOR_CUSTODY_REQUIREMENT), NUMBER_OF_CUSTODY_GROUPS)
|
||||
```
|
||||
@@ -188,10 +192,9 @@ def get_data_column_sidecars(
|
||||
signed_block_header: SignedBeaconBlockHeader,
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
|
||||
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH],
|
||||
cells_and_kzg_proofs: Sequence[Tuple[
|
||||
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_EXT_BLOB]
|
||||
]]
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]
|
||||
],
|
||||
) -> Sequence[DataColumnSidecar]:
|
||||
"""
|
||||
Given a signed block header and the commitments, inclusion proof, cells/proofs associated with
|
||||
@@ -205,14 +208,16 @@ def get_data_column_sidecars(
|
||||
for cells, proofs in cells_and_kzg_proofs:
|
||||
column_cells.append(cells[column_index])
|
||||
column_proofs.append(proofs[column_index])
|
||||
sidecars.append(DataColumnSidecar(
|
||||
index=column_index,
|
||||
column=column_cells,
|
||||
kzg_commitments=kzg_commitments,
|
||||
kzg_proofs=column_proofs,
|
||||
signed_block_header=signed_block_header,
|
||||
kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof,
|
||||
))
|
||||
sidecars.append(
|
||||
DataColumnSidecar(
|
||||
index=column_index,
|
||||
column=column_cells,
|
||||
kzg_commitments=kzg_commitments,
|
||||
kzg_proofs=column_proofs,
|
||||
signed_block_header=signed_block_header,
|
||||
kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof,
|
||||
)
|
||||
)
|
||||
return sidecars
|
||||
```
|
||||
|
||||
@@ -221,10 +226,9 @@ def get_data_column_sidecars(
|
||||
```python
|
||||
def get_data_column_sidecars_from_block(
|
||||
signed_block: SignedBeaconBlock,
|
||||
cells_and_kzg_proofs: Sequence[Tuple[
|
||||
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_EXT_BLOB]
|
||||
]]
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]
|
||||
],
|
||||
) -> Sequence[DataColumnSidecar]:
|
||||
"""
|
||||
Given a signed block and the cells/proofs associated with each blob in the
|
||||
@@ -234,13 +238,13 @@ def get_data_column_sidecars_from_block(
|
||||
signed_block_header = compute_signed_block_header(signed_block)
|
||||
kzg_commitments_inclusion_proof = compute_merkle_proof(
|
||||
signed_block.message.body,
|
||||
get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments'),
|
||||
get_generalized_index(BeaconBlockBody, "blob_kzg_commitments"),
|
||||
)
|
||||
return get_data_column_sidecars(
|
||||
signed_block_header,
|
||||
blob_kzg_commitments,
|
||||
kzg_commitments_inclusion_proof,
|
||||
cells_and_kzg_proofs
|
||||
cells_and_kzg_proofs,
|
||||
)
|
||||
```
|
||||
|
||||
@@ -249,10 +253,9 @@ def get_data_column_sidecars_from_block(
|
||||
```python
|
||||
def get_data_column_sidecars_from_column_sidecar(
|
||||
sidecar: DataColumnSidecar,
|
||||
cells_and_kzg_proofs: Sequence[Tuple[
|
||||
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_EXT_BLOB]
|
||||
]]
|
||||
cells_and_kzg_proofs: Sequence[
|
||||
Tuple[Vector[Cell, CELLS_PER_EXT_BLOB], Vector[KZGProof, CELLS_PER_EXT_BLOB]]
|
||||
],
|
||||
) -> Sequence[DataColumnSidecar]:
|
||||
"""
|
||||
Given a DataColumnSidecar and the cells/proofs associated with each blob corresponding
|
||||
@@ -264,7 +267,7 @@ def get_data_column_sidecars_from_column_sidecar(
|
||||
sidecar.signed_block_header,
|
||||
sidecar.kzg_commitments,
|
||||
sidecar.kzg_commitments_inclusion_proof,
|
||||
cells_and_kzg_proofs
|
||||
cells_and_kzg_proofs,
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -748,7 +748,9 @@ def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool:
|
||||
"""
|
||||
Check if ``validator`` is slashable.
|
||||
"""
|
||||
return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
|
||||
return (not validator.slashed) and (
|
||||
validator.activation_epoch <= epoch < validator.withdrawable_epoch
|
||||
)
|
||||
```
|
||||
|
||||
#### `is_slashable_attestation_data`
|
||||
@@ -760,7 +762,8 @@ def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationDa
|
||||
"""
|
||||
return (
|
||||
# Double vote
|
||||
(data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or
|
||||
(data_1 != data_2 and data_1.target.epoch == data_2.target.epoch)
|
||||
or
|
||||
# Surround vote
|
||||
(data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
|
||||
)
|
||||
@@ -769,7 +772,9 @@ def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationDa
|
||||
#### `is_valid_indexed_attestation`
|
||||
|
||||
```python
|
||||
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||
def is_valid_indexed_attestation(
|
||||
state: BeaconState, indexed_attestation: IndexedAttestation
|
||||
) -> bool:
|
||||
"""
|
||||
Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature.
|
||||
"""
|
||||
@@ -787,7 +792,9 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||
#### `is_valid_merkle_branch`
|
||||
|
||||
```python
|
||||
def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root) -> bool:
|
||||
def is_valid_merkle_branch(
|
||||
leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root
|
||||
) -> bool:
|
||||
"""
|
||||
Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``.
|
||||
"""
|
||||
@@ -818,9 +825,7 @@ def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) ->
|
||||
flip = (pivot + index_count - index) % index_count
|
||||
position = max(index, flip)
|
||||
source = hash(
|
||||
seed
|
||||
+ uint_to_bytes(uint8(current_round))
|
||||
+ uint_to_bytes(uint32(position // 256))
|
||||
seed + uint_to_bytes(uint8(current_round)) + uint_to_bytes(uint32(position // 256))
|
||||
)
|
||||
byte = uint8(source[(position % 256) // 8])
|
||||
bit = (byte >> (position % 8)) % 2
|
||||
@@ -832,7 +837,9 @@ def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) ->
|
||||
#### `compute_proposer_index`
|
||||
|
||||
```python
|
||||
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
|
||||
def compute_proposer_index(
|
||||
state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32
|
||||
) -> ValidatorIndex:
|
||||
"""
|
||||
Return from ``indices`` a random index sampled by effective balance.
|
||||
"""
|
||||
@@ -852,16 +859,18 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
|
||||
#### `compute_committee`
|
||||
|
||||
```python
|
||||
def compute_committee(indices: Sequence[ValidatorIndex],
|
||||
seed: Bytes32,
|
||||
index: uint64,
|
||||
count: uint64) -> Sequence[ValidatorIndex]:
|
||||
def compute_committee(
|
||||
indices: Sequence[ValidatorIndex], seed: Bytes32, index: uint64, count: uint64
|
||||
) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``.
|
||||
"""
|
||||
start = (len(indices) * index) // count
|
||||
end = (len(indices) * uint64(index + 1)) // count
|
||||
return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
|
||||
return [
|
||||
indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)]
|
||||
for i in range(start, end)
|
||||
]
|
||||
```
|
||||
|
||||
#### `compute_epoch_at_slot`
|
||||
@@ -902,10 +911,12 @@ def compute_fork_data_root(current_version: Version, genesis_validators_root: Ro
|
||||
Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``.
|
||||
This is used primarily in signature domains to avoid collisions across forks/chains.
|
||||
"""
|
||||
return hash_tree_root(ForkData(
|
||||
current_version=current_version,
|
||||
genesis_validators_root=genesis_validators_root,
|
||||
))
|
||||
return hash_tree_root(
|
||||
ForkData(
|
||||
current_version=current_version,
|
||||
genesis_validators_root=genesis_validators_root,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
#### `compute_fork_digest`
|
||||
@@ -923,7 +934,9 @@ def compute_fork_digest(current_version: Version, genesis_validators_root: Root)
|
||||
#### `compute_domain`
|
||||
|
||||
```python
|
||||
def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain:
|
||||
def compute_domain(
|
||||
domain_type: DomainType, fork_version: Version = None, genesis_validators_root: Root = None
|
||||
) -> Domain:
|
||||
"""
|
||||
Return the domain for the ``domain_type`` and ``fork_version``.
|
||||
"""
|
||||
@@ -942,10 +955,12 @@ def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
|
||||
"""
|
||||
Return the signing root for the corresponding signing data.
|
||||
"""
|
||||
return hash_tree_root(SigningData(
|
||||
object_root=hash_tree_root(ssz_object),
|
||||
domain=domain,
|
||||
))
|
||||
return hash_tree_root(
|
||||
SigningData(
|
||||
object_root=hash_tree_root(ssz_object),
|
||||
domain=domain,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
@@ -1009,7 +1024,9 @@ def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[V
|
||||
"""
|
||||
Return the sequence of active validator indices at ``epoch``.
|
||||
"""
|
||||
return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)]
|
||||
return [
|
||||
ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)
|
||||
]
|
||||
```
|
||||
|
||||
#### `get_validator_churn_limit`
|
||||
@@ -1020,7 +1037,9 @@ def get_validator_churn_limit(state: BeaconState) -> uint64:
|
||||
Return the validator churn limit for the current epoch.
|
||||
"""
|
||||
active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
|
||||
return max(
|
||||
MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_seed`
|
||||
@@ -1030,7 +1049,9 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes
|
||||
"""
|
||||
Return the seed at ``epoch``.
|
||||
"""
|
||||
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
|
||||
mix = get_randao_mix(
|
||||
state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)
|
||||
) # Avoid underflow
|
||||
return hash(domain_type + uint_to_bytes(epoch) + mix)
|
||||
```
|
||||
|
||||
@@ -1041,16 +1062,23 @@ def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64:
|
||||
"""
|
||||
Return the number of committees in each slot for the given ``epoch``.
|
||||
"""
|
||||
return max(uint64(1), min(
|
||||
MAX_COMMITTEES_PER_SLOT,
|
||||
uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
|
||||
))
|
||||
return max(
|
||||
uint64(1),
|
||||
min(
|
||||
MAX_COMMITTEES_PER_SLOT,
|
||||
uint64(len(get_active_validator_indices(state, epoch)))
|
||||
// SLOTS_PER_EPOCH
|
||||
// TARGET_COMMITTEE_SIZE,
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_beacon_committee`
|
||||
|
||||
```python
|
||||
def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]:
|
||||
def get_beacon_committee(
|
||||
state: BeaconState, slot: Slot, index: CommitteeIndex
|
||||
) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the beacon committee at ``slot`` for ``index``.
|
||||
"""
|
||||
@@ -1086,7 +1114,12 @@ def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
|
||||
``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
Math safe up to ~10B ETH, after which this overflows uint64.
|
||||
"""
|
||||
return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
|
||||
return Gwei(
|
||||
max(
|
||||
EFFECTIVE_BALANCE_INCREMENT,
|
||||
sum([state.validators[index].effective_balance for index in indices]),
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_total_active_balance`
|
||||
@@ -1097,18 +1130,22 @@ def get_total_active_balance(state: BeaconState) -> Gwei:
|
||||
Return the combined effective balance of the active validators.
|
||||
Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
"""
|
||||
return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state))))
|
||||
return get_total_balance(
|
||||
state, set(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_domain`
|
||||
|
||||
```python
|
||||
def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain:
|
||||
def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch = None) -> Domain:
|
||||
"""
|
||||
Return the signature domain (fork version concatenated with domain type) of a message.
|
||||
"""
|
||||
epoch = get_current_epoch(state) if epoch is None else epoch
|
||||
fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
|
||||
fork_version = (
|
||||
state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
|
||||
)
|
||||
return compute_domain(domain_type, fork_version, state.genesis_validators_root)
|
||||
```
|
||||
|
||||
@@ -1188,9 +1225,9 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
#### `slash_validator`
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
def slash_validator(
|
||||
state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex = None
|
||||
) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
@@ -1198,9 +1235,13 @@ def slash_validator(state: BeaconState,
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
validator.withdrawable_epoch = max(
|
||||
validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)
|
||||
)
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT)
|
||||
decrease_balance(
|
||||
state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT
|
||||
)
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
@@ -1233,9 +1274,9 @@ Due to this constraint, if
|
||||
configured to avoid this case.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit]) -> BeaconState:
|
||||
def initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash: Hash32, eth1_timestamp: uint64, deposits: Sequence[Deposit]
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=GENESIS_FORK_VERSION,
|
||||
current_version=GENESIS_FORK_VERSION,
|
||||
@@ -1246,20 +1287,23 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
randao_mixes=[eth1_block_hash]
|
||||
* EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[: index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
validator.effective_balance = min(
|
||||
balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE
|
||||
)
|
||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
@@ -1300,7 +1344,9 @@ out-of-range list access) are considered invalid. State transitions that cause a
|
||||
`uint64` overflow or underflow are also considered invalid.
|
||||
|
||||
```python
|
||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None:
|
||||
def state_transition(
|
||||
state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool = True
|
||||
) -> None:
|
||||
block = signed_block.message
|
||||
# Process slots (including those with no blocks) since block
|
||||
process_slots(state, block.slot)
|
||||
@@ -1317,7 +1363,9 @@ def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, valida
|
||||
```python
|
||||
def verify_block_signature(state: BeaconState, signed_block: SignedBeaconBlock) -> bool:
|
||||
proposer = state.validators[signed_block.message.proposer_index]
|
||||
signing_root = compute_signing_root(signed_block.message, get_domain(state, DOMAIN_BEACON_PROPOSER))
|
||||
signing_root = compute_signing_root(
|
||||
signed_block.message, get_domain(state, DOMAIN_BEACON_PROPOSER)
|
||||
)
|
||||
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
|
||||
```
|
||||
|
||||
@@ -1364,30 +1412,43 @@ def process_epoch(state: BeaconState) -> None:
|
||||
#### Helper functions
|
||||
|
||||
```python
|
||||
def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
|
||||
def get_matching_source_attestations(
|
||||
state: BeaconState, epoch: Epoch
|
||||
) -> Sequence[PendingAttestation]:
|
||||
assert epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations
|
||||
return (
|
||||
state.current_epoch_attestations
|
||||
if epoch == get_current_epoch(state)
|
||||
else state.previous_epoch_attestations
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
|
||||
def get_matching_target_attestations(
|
||||
state: BeaconState, epoch: Epoch
|
||||
) -> Sequence[PendingAttestation]:
|
||||
return [
|
||||
a for a in get_matching_source_attestations(state, epoch)
|
||||
a
|
||||
for a in get_matching_source_attestations(state, epoch)
|
||||
if a.data.target.root == get_block_root(state, epoch)
|
||||
]
|
||||
```
|
||||
|
||||
```python
|
||||
def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
|
||||
def get_matching_head_attestations(
|
||||
state: BeaconState, epoch: Epoch
|
||||
) -> Sequence[PendingAttestation]:
|
||||
return [
|
||||
a for a in get_matching_target_attestations(state, epoch)
|
||||
a
|
||||
for a in get_matching_target_attestations(state, epoch)
|
||||
if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot)
|
||||
]
|
||||
```
|
||||
|
||||
```python
|
||||
def get_unslashed_attesting_indices(state: BeaconState,
|
||||
attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
|
||||
def get_unslashed_attesting_indices(
|
||||
state: BeaconState, attestations: Sequence[PendingAttestation]
|
||||
) -> Set[ValidatorIndex]:
|
||||
output: Set[ValidatorIndex] = set()
|
||||
for a in attestations:
|
||||
output = output.union(get_attesting_indices(state, a))
|
||||
@@ -1416,14 +1477,18 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
total_active_balance = get_total_active_balance(state)
|
||||
previous_target_balance = get_attesting_balance(state, previous_attestations)
|
||||
current_target_balance = get_attesting_balance(state, current_attestations)
|
||||
weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
|
||||
weigh_justification_and_finalization(
|
||||
state, total_active_balance, previous_target_balance, current_target_balance
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def weigh_justification_and_finalization(state: BeaconState,
|
||||
total_active_balance: Gwei,
|
||||
previous_epoch_target_balance: Gwei,
|
||||
current_epoch_target_balance: Gwei) -> None:
|
||||
def weigh_justification_and_finalization(
|
||||
state: BeaconState,
|
||||
total_active_balance: Gwei,
|
||||
previous_epoch_target_balance: Gwei,
|
||||
current_epoch_target_balance: Gwei,
|
||||
) -> None:
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
current_epoch = get_current_epoch(state)
|
||||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
@@ -1431,15 +1496,17 @@ def weigh_justification_and_finalization(state: BeaconState,
|
||||
|
||||
# Process justifications
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
|
||||
state.justification_bits[1:] = state.justification_bits[: JUSTIFICATION_BITS_LENGTH - 1]
|
||||
state.justification_bits[0] = 0b0
|
||||
if previous_epoch_target_balance * 3 >= total_active_balance * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||
root=get_block_root(state, previous_epoch))
|
||||
state.current_justified_checkpoint = Checkpoint(
|
||||
epoch=previous_epoch, root=get_block_root(state, previous_epoch)
|
||||
)
|
||||
state.justification_bits[1] = 0b1
|
||||
if current_epoch_target_balance * 3 >= total_active_balance * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||
root=get_block_root(state, current_epoch))
|
||||
state.current_justified_checkpoint = Checkpoint(
|
||||
epoch=current_epoch, root=get_block_root(state, current_epoch)
|
||||
)
|
||||
state.justification_bits[0] = 0b1
|
||||
|
||||
# Process finalizations
|
||||
@@ -1466,7 +1533,12 @@ def weigh_justification_and_finalization(state: BeaconState,
|
||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
total_balance = get_total_active_balance(state)
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
|
||||
return Gwei(
|
||||
effective_balance
|
||||
* BASE_REWARD_FACTOR
|
||||
// integer_squareroot(total_balance)
|
||||
// BASE_REWARDS_PER_EPOCH
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
@@ -1488,15 +1560,17 @@ def is_in_inactivity_leak(state: BeaconState) -> bool:
|
||||
def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
return [
|
||||
ValidatorIndex(index) for index, v in enumerate(state.validators)
|
||||
if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
||||
ValidatorIndex(index)
|
||||
for index, v in enumerate(state.validators)
|
||||
if is_active_validator(v, previous_epoch)
|
||||
or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
||||
]
|
||||
```
|
||||
|
||||
```python
|
||||
def get_attestation_component_deltas(state: BeaconState,
|
||||
attestations: Sequence[PendingAttestation]
|
||||
) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
def get_attestation_component_deltas(
|
||||
state: BeaconState, attestations: Sequence[PendingAttestation]
|
||||
) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Helper with shared logic for use by get source, target, and head deltas functions
|
||||
"""
|
||||
@@ -1527,7 +1601,9 @@ def get_source_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei
|
||||
"""
|
||||
Return attester micro-rewards/penalties for source-vote for each validator.
|
||||
"""
|
||||
matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state))
|
||||
matching_source_attestations = get_matching_source_attestations(
|
||||
state, get_previous_epoch(state)
|
||||
)
|
||||
return get_attestation_component_deltas(state, matching_source_attestations)
|
||||
```
|
||||
|
||||
@@ -1536,7 +1612,9 @@ def get_target_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei
|
||||
"""
|
||||
Return attester micro-rewards/penalties for target-vote for each validator.
|
||||
"""
|
||||
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
|
||||
matching_target_attestations = get_matching_target_attestations(
|
||||
state, get_previous_epoch(state)
|
||||
)
|
||||
return get_attestation_component_deltas(state, matching_target_attestations)
|
||||
```
|
||||
|
||||
@@ -1555,14 +1633,18 @@ def get_inclusion_delay_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequ
|
||||
Return proposer and inclusion delay micro-rewards/penalties for each validator.
|
||||
"""
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state))
|
||||
matching_source_attestations = get_matching_source_attestations(
|
||||
state, get_previous_epoch(state)
|
||||
)
|
||||
for index in get_unslashed_attesting_indices(state, matching_source_attestations):
|
||||
attestation = min([
|
||||
a for a in matching_source_attestations
|
||||
if index in get_attesting_indices(state, a)
|
||||
], key=lambda a: a.inclusion_delay)
|
||||
attestation = min(
|
||||
[a for a in matching_source_attestations if index in get_attesting_indices(state, a)],
|
||||
key=lambda a: a.inclusion_delay,
|
||||
)
|
||||
rewards[attestation.proposer_index] += get_proposer_reward(state, index)
|
||||
max_attester_reward = Gwei(get_base_reward(state, index) - get_proposer_reward(state, index))
|
||||
max_attester_reward = Gwei(
|
||||
get_base_reward(state, index) - get_proposer_reward(state, index)
|
||||
)
|
||||
rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay)
|
||||
|
||||
# No penalties associated with inclusion delay
|
||||
@@ -1577,15 +1659,23 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
"""
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
if is_in_inactivity_leak(state):
|
||||
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
|
||||
matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
|
||||
matching_target_attestations = get_matching_target_attestations(
|
||||
state, get_previous_epoch(state)
|
||||
)
|
||||
matching_target_attesting_indices = get_unslashed_attesting_indices(
|
||||
state, matching_target_attestations
|
||||
)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
# If validator is performing optimally this cancels all rewards for a neutral balance
|
||||
base_reward = get_base_reward(state, index)
|
||||
penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index))
|
||||
penalties[index] += Gwei(
|
||||
BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index)
|
||||
)
|
||||
if index not in matching_target_attesting_indices:
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
penalties[index] += Gwei(effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT)
|
||||
penalties[index] += Gwei(
|
||||
effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT
|
||||
)
|
||||
|
||||
# No rewards associated with inactivity penalties
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
@@ -1648,13 +1738,17 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||
initiate_validator_exit(state, ValidatorIndex(index))
|
||||
|
||||
# Queue validators eligible for activation and not yet dequeued for activation
|
||||
activation_queue = sorted([
|
||||
index for index, validator in enumerate(state.validators)
|
||||
if is_eligible_for_activation(state, validator)
|
||||
activation_queue = sorted(
|
||||
[
|
||||
index
|
||||
for index, validator in enumerate(state.validators)
|
||||
if is_eligible_for_activation(state, validator)
|
||||
],
|
||||
# Order by the sequence of activation_eligibility_epoch setting and then index
|
||||
], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
|
||||
key=lambda index: (state.validators[index].activation_eligibility_epoch, index),
|
||||
)
|
||||
# Dequeued validators for activation up to churn limit
|
||||
for index in activation_queue[:get_validator_churn_limit(state)]:
|
||||
for index in activation_queue[: get_validator_churn_limit(state)]:
|
||||
validator = state.validators[index]
|
||||
validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
```
|
||||
@@ -1665,11 +1759,18 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||
def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
adjusted_total_slashing_balance = min(
|
||||
sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance
|
||||
)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
if (
|
||||
validator.slashed
|
||||
and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch
|
||||
):
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
penalty_numerator = (
|
||||
validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
)
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
@@ -1698,7 +1799,9 @@ def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
balance + DOWNWARD_THRESHOLD < validator.effective_balance
|
||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
validator.effective_balance = min(
|
||||
balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE
|
||||
)
|
||||
```
|
||||
|
||||
#### Slashings balances updates
|
||||
@@ -1717,7 +1820,9 @@ def process_randao_mixes_reset(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Set randao mix
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(
|
||||
state, current_epoch
|
||||
)
|
||||
```
|
||||
|
||||
#### Historical roots updates
|
||||
@@ -1727,7 +1832,9 @@ def process_historical_roots_update(state: BeaconState) -> None:
|
||||
# Set historical root accumulator
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
historical_batch = HistoricalBatch(
|
||||
block_roots=state.block_roots, state_roots=state.state_roots
|
||||
)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
```
|
||||
|
||||
@@ -1795,7 +1902,10 @@ def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
```python
|
||||
def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
state.eth1_data_votes.append(body.eth1_data)
|
||||
if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
|
||||
if (
|
||||
state.eth1_data_votes.count(body.eth1_data) * 2
|
||||
> EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH
|
||||
):
|
||||
state.eth1_data = body.eth1_data
|
||||
```
|
||||
|
||||
@@ -1804,7 +1914,9 @@ def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
assert len(body.deposits) == min(
|
||||
MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index
|
||||
)
|
||||
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
@@ -1835,7 +1947,9 @@ def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSla
|
||||
assert is_slashable_validator(proposer, get_current_epoch(state))
|
||||
# Verify signatures
|
||||
for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2):
|
||||
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot))
|
||||
domain = get_domain(
|
||||
state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot)
|
||||
)
|
||||
signing_root = compute_signing_root(signed_header.message, domain)
|
||||
assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
|
||||
|
||||
@@ -1895,7 +2009,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
##### Deposits
|
||||
|
||||
```python
|
||||
def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
|
||||
def get_validator_from_deposit(
|
||||
pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> Validator:
|
||||
effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
|
||||
return Validator(
|
||||
@@ -1911,20 +2027,21 @@ def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes3
|
||||
```
|
||||
|
||||
```python
|
||||
def add_validator_to_registry(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64) -> None:
|
||||
def add_validator_to_registry(
|
||||
state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64
|
||||
) -> None:
|
||||
state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
|
||||
state.balances.append(amount)
|
||||
```
|
||||
|
||||
```python
|
||||
def apply_deposit(state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature) -> None:
|
||||
def apply_deposit(
|
||||
state: BeaconState,
|
||||
pubkey: BLSPubkey,
|
||||
withdrawal_credentials: Bytes32,
|
||||
amount: uint64,
|
||||
signature: BLSSignature,
|
||||
) -> None:
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
@@ -1933,7 +2050,8 @@ def apply_deposit(state: BeaconState,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
amount=amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
# Fork-agnostic domain since deposits are valid across forks
|
||||
domain = compute_domain(DOMAIN_DEPOSIT)
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
if bls.Verify(pubkey, signing_root, signature):
|
||||
add_validator_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
@@ -1949,7 +2067,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(deposit.data),
|
||||
branch=deposit.proof,
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
# Add 1 for the List length mix-in
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1,
|
||||
index=state.eth1_deposit_index,
|
||||
root=state.eth1_data.deposit_root,
|
||||
)
|
||||
|
||||
@@ -194,7 +194,7 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
|
||||
blocks={anchor_root: copy(anchor_block)},
|
||||
block_states={anchor_root: copy(anchor_state)},
|
||||
checkpoint_states={justified_checkpoint: copy(anchor_state)},
|
||||
unrealized_justifications={anchor_root: justified_checkpoint}
|
||||
unrealized_justifications={anchor_root: justified_checkpoint},
|
||||
)
|
||||
```
|
||||
|
||||
@@ -270,15 +270,22 @@ def get_proposer_score(store: Store) -> Gwei:
|
||||
def get_weight(store: Store, root: Root) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
unslashed_and_active_indices = [
|
||||
i for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
i
|
||||
for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
if not state.validators[i].slashed
|
||||
]
|
||||
attestation_score = Gwei(sum(
|
||||
state.validators[i].effective_balance for i in unslashed_and_active_indices
|
||||
if (i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
))
|
||||
attestation_score = Gwei(
|
||||
sum(
|
||||
state.validators[i].effective_balance
|
||||
for i in unslashed_and_active_indices
|
||||
if (
|
||||
i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot)
|
||||
== root
|
||||
)
|
||||
)
|
||||
)
|
||||
if store.proposer_boost_root == Root():
|
||||
# Return only attestation score if ``proposer_boost_root`` is not set
|
||||
return attestation_score
|
||||
@@ -320,8 +327,7 @@ by the recursive logic in this function) MUST set `block_root` to
|
||||
def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
|
||||
block = store.blocks[block_root]
|
||||
children = [
|
||||
root for root in store.blocks.keys()
|
||||
if store.blocks[root].parent_root == block_root
|
||||
root for root in store.blocks.keys() if store.blocks[root].parent_root == block_root
|
||||
]
|
||||
|
||||
# If any children branches contain expected finalized/justified checkpoints,
|
||||
@@ -387,10 +393,7 @@ def get_head(store: Store) -> Root:
|
||||
# Execute the LMD-GHOST fork choice
|
||||
head = store.justified_checkpoint.root
|
||||
while True:
|
||||
children = [
|
||||
root for root in blocks.keys()
|
||||
if blocks[root].parent_root == head
|
||||
]
|
||||
children = [root for root in blocks.keys() if blocks[root].parent_root == head]
|
||||
if len(children) == 0:
|
||||
return head
|
||||
# Sort by latest attesting balance with ties broken lexicographically
|
||||
@@ -401,7 +404,9 @@ def get_head(store: Store) -> Root:
|
||||
#### `update_checkpoints`
|
||||
|
||||
```python
|
||||
def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint) -> None:
|
||||
def update_checkpoints(
|
||||
store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint
|
||||
) -> None:
|
||||
"""
|
||||
Update checkpoints in store if necessary
|
||||
"""
|
||||
@@ -417,8 +422,11 @@ def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized
|
||||
#### `update_unrealized_checkpoints`
|
||||
|
||||
```python
|
||||
def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint: Checkpoint,
|
||||
unrealized_finalized_checkpoint: Checkpoint) -> None:
|
||||
def update_unrealized_checkpoints(
|
||||
store: Store,
|
||||
unrealized_justified_checkpoint: Checkpoint,
|
||||
unrealized_finalized_checkpoint: Checkpoint,
|
||||
) -> None:
|
||||
"""
|
||||
Update unrealized checkpoints in store if necessary
|
||||
"""
|
||||
@@ -453,7 +461,9 @@ def is_shuffling_stable(slot: Slot) -> bool:
|
||||
|
||||
```python
|
||||
def is_ffg_competitive(store: Store, head_root: Root, parent_root: Root) -> bool:
|
||||
return (store.unrealized_justifications[head_root] == store.unrealized_justifications[parent_root])
|
||||
return (
|
||||
store.unrealized_justifications[head_root] == store.unrealized_justifications[parent_root]
|
||||
)
|
||||
```
|
||||
|
||||
##### `is_finalization_ok`
|
||||
@@ -529,8 +539,18 @@ def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root:
|
||||
# Check that the missing votes are assigned to the parent and not being hoarded.
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
|
||||
if all([head_late, shuffling_stable, ffg_competitive, finalization_ok,
|
||||
proposing_on_time, single_slot_reorg, head_weak, parent_strong]):
|
||||
if all(
|
||||
[
|
||||
head_late,
|
||||
shuffling_stable,
|
||||
ffg_competitive,
|
||||
finalization_ok,
|
||||
proposing_on_time,
|
||||
single_slot_reorg,
|
||||
head_weak,
|
||||
parent_strong,
|
||||
]
|
||||
):
|
||||
# We can re-org the current head by building upon its parent block.
|
||||
return parent_root
|
||||
else:
|
||||
@@ -552,7 +572,9 @@ def compute_pulled_up_tip(store: Store, block_root: Root) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
|
||||
store.unrealized_justifications[block_root] = state.current_justified_checkpoint
|
||||
update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
update_unrealized_checkpoints(
|
||||
store, state.current_justified_checkpoint, state.finalized_checkpoint
|
||||
)
|
||||
|
||||
# If the block is from a prior epoch, apply the realized values
|
||||
block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot)
|
||||
@@ -580,7 +602,9 @@ def on_tick_per_slot(store: Store, time: uint64) -> None:
|
||||
|
||||
# If a new epoch, pull-up justification and finalization from previous epoch
|
||||
if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0:
|
||||
update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint)
|
||||
update_checkpoints(
|
||||
store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint
|
||||
)
|
||||
```
|
||||
|
||||
#### `on_attestation` helpers
|
||||
@@ -621,7 +645,9 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc
|
||||
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
|
||||
# LMD vote must be consistent with FFG vote target
|
||||
assert target.root == get_checkpoint_block(store, attestation.data.beacon_block_root, target.epoch)
|
||||
assert target.root == get_checkpoint_block(
|
||||
store, attestation.data.beacon_block_root, target.epoch
|
||||
)
|
||||
|
||||
# Attestations can only affect the fork choice of subsequent slots.
|
||||
# Delay consideration in the fork choice until their slot is in the past.
|
||||
@@ -643,10 +669,14 @@ def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None:
|
||||
##### `update_latest_messages`
|
||||
|
||||
```python
|
||||
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
|
||||
def update_latest_messages(
|
||||
store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation
|
||||
) -> None:
|
||||
target = attestation.data.target
|
||||
beacon_block_root = attestation.data.beacon_block_root
|
||||
non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices]
|
||||
non_equivocating_attesting_indices = [
|
||||
i for i in attesting_indices if i not in store.equivocating_indices
|
||||
]
|
||||
for i in non_equivocating_attesting_indices:
|
||||
if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root)
|
||||
@@ -720,7 +750,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
#### `on_attestation`
|
||||
|
||||
```python
|
||||
def on_attestation(store: Store, attestation: Attestation, is_from_block: bool=False) -> None:
|
||||
def on_attestation(store: Store, attestation: Attestation, is_from_block: bool = False) -> None:
|
||||
"""
|
||||
Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||
|
||||
|
||||
@@ -1315,7 +1315,9 @@ should:
|
||||
def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID:
|
||||
node_id_prefix = node_id >> (NODE_ID_BITS - ATTESTATION_SUBNET_PREFIX_BITS)
|
||||
node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION
|
||||
permutation_seed = hash(uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)))
|
||||
permutation_seed = hash(
|
||||
uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION))
|
||||
)
|
||||
permutated_prefix = compute_shuffled_index(
|
||||
node_id_prefix,
|
||||
1 << ATTESTATION_SUBNET_PREFIX_BITS,
|
||||
@@ -1966,8 +1968,7 @@ epoch range, we use the worst case event of a very large validator size
|
||||
|
||||
```python
|
||||
MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
+ MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -269,10 +269,9 @@ helper via `get_committee_assignment(state, epoch, validator_index)` where
|
||||
`epoch <= next_epoch`.
|
||||
|
||||
```python
|
||||
def get_committee_assignment(state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex
|
||||
) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]:
|
||||
def get_committee_assignment(
|
||||
state: BeaconState, epoch: Epoch, validator_index: ValidatorIndex
|
||||
) -> Optional[Tuple[Sequence[ValidatorIndex], CommitteeIndex, Slot]]:
|
||||
"""
|
||||
Return the committee assignment in the ``epoch`` for ``validator_index``.
|
||||
``assignment`` returned is a tuple of the following form:
|
||||
@@ -456,7 +455,9 @@ def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
|
||||
|
||||
```python
|
||||
def voting_period_start_time(state: BeaconState) -> uint64:
|
||||
eth1_voting_period_start_slot = Slot(state.slot - state.slot % (EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH))
|
||||
eth1_voting_period_start_slot = Slot(
|
||||
state.slot - state.slot % (EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH)
|
||||
)
|
||||
return compute_time_at_slot(state, eth1_voting_period_start_slot)
|
||||
```
|
||||
|
||||
@@ -473,7 +474,8 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||
period_start = voting_period_start_time(state)
|
||||
# `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height
|
||||
votes_to_consider = [
|
||||
get_eth1_data(block) for block in eth1_chain
|
||||
get_eth1_data(block)
|
||||
for block in eth1_chain
|
||||
if (
|
||||
is_candidate_block(block, period_start)
|
||||
# Ensure cannot move back to earlier deposit contract states
|
||||
@@ -487,12 +489,18 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
|
||||
# Non-substantive casting for linter
|
||||
state_eth1_data: Eth1Data = state.eth1_data
|
||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||
default_vote = (
|
||||
votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||
)
|
||||
|
||||
return max(
|
||||
valid_votes,
|
||||
key=lambda v: (valid_votes.count(v), -valid_votes.index(v)), # Tiebreak by smallest distance
|
||||
default=default_vote
|
||||
# Tiebreak by smallest distance
|
||||
key=lambda v: (
|
||||
valid_votes.count(v),
|
||||
-valid_votes.index(v),
|
||||
),
|
||||
default=default_vote,
|
||||
)
|
||||
```
|
||||
|
||||
@@ -666,7 +674,9 @@ Set `attestation.signature = attestation_signature` where
|
||||
`attestation_signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_attestation_signature(state: BeaconState, attestation_data: AttestationData, privkey: int) -> BLSSignature:
|
||||
def get_attestation_signature(
|
||||
state: BeaconState, attestation_data: AttestationData, privkey: int
|
||||
) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
||||
signing_root = compute_signing_root(attestation_data, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
@@ -685,9 +695,9 @@ The `subnet_id` for the `attestation` is calculated with:
|
||||
`subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`.
|
||||
|
||||
```python
|
||||
def compute_subnet_for_attestation(committees_per_slot: uint64,
|
||||
slot: Slot,
|
||||
committee_index: CommitteeIndex) -> SubnetID:
|
||||
def compute_subnet_for_attestation(
|
||||
committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex
|
||||
) -> SubnetID:
|
||||
"""
|
||||
Compute the correct subnet for an attestation for Phase 0.
|
||||
Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
@@ -716,7 +726,9 @@ def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSigna
|
||||
```
|
||||
|
||||
```python
|
||||
def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
|
||||
def is_aggregator(
|
||||
state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature
|
||||
) -> bool:
|
||||
committee = get_beacon_committee(state, slot, index)
|
||||
modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
|
||||
return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
|
||||
@@ -775,10 +787,9 @@ First,
|
||||
is constructed.
|
||||
|
||||
```python
|
||||
def get_aggregate_and_proof(state: BeaconState,
|
||||
aggregator_index: ValidatorIndex,
|
||||
aggregate: Attestation,
|
||||
privkey: int) -> AggregateAndProof:
|
||||
def get_aggregate_and_proof(
|
||||
state: BeaconState, aggregator_index: ValidatorIndex, aggregate: Attestation, privkey: int
|
||||
) -> AggregateAndProof:
|
||||
return AggregateAndProof(
|
||||
aggregator_index=aggregator_index,
|
||||
aggregate=aggregate,
|
||||
@@ -791,11 +802,13 @@ Then
|
||||
is constructed and broadcast. Where `signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_aggregate_and_proof_signature(state: BeaconState,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
privkey: int) -> BLSSignature:
|
||||
def get_aggregate_and_proof_signature(
|
||||
state: BeaconState, aggregate_and_proof: AggregateAndProof, privkey: int
|
||||
) -> BLSSignature:
|
||||
aggregate = aggregate_and_proof.aggregate
|
||||
domain = get_domain(state, DOMAIN_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot))
|
||||
domain = get_domain(
|
||||
state, DOMAIN_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot)
|
||||
)
|
||||
signing_root = compute_signing_root(aggregate_and_proof, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
@@ -112,14 +112,10 @@ def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||
epochs_for_validator_set_churn = (
|
||||
N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
|
||||
)
|
||||
epochs_for_balance_top_ups = (
|
||||
N * (200 + 3 * D) // (600 * Delta)
|
||||
)
|
||||
epochs_for_balance_top_ups = N * (200 + 3 * D) // (600 * Delta)
|
||||
ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
|
||||
else:
|
||||
ws_period += (
|
||||
3 * N * D * t // (200 * Delta * (T - t))
|
||||
)
|
||||
ws_period += 3 * N * D * t // (200 * Delta * (T - t))
|
||||
|
||||
return ws_period
|
||||
```
|
||||
@@ -182,7 +178,9 @@ source). The check can be implemented in the following way:
|
||||
#### `is_within_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
||||
def is_within_weak_subjectivity_period(
|
||||
store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint
|
||||
) -> bool:
|
||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||
assert ws_state.latest_block_header.state_root == ws_checkpoint.root
|
||||
assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch
|
||||
|
||||
@@ -125,8 +125,9 @@ def item_length(typ: SSZType) -> int:
|
||||
```
|
||||
|
||||
```python
|
||||
def get_elem_type(typ: Union[BaseBytes, BaseList, Container],
|
||||
index_or_variable_name: Union[int, SSZVariableName]) -> SSZType:
|
||||
def get_elem_type(
|
||||
typ: Union[BaseBytes, BaseList, Container], index_or_variable_name: Union[int, SSZVariableName]
|
||||
) -> SSZType:
|
||||
"""
|
||||
Return the type of the element of an object of the given type with the given index
|
||||
or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`)
|
||||
@@ -157,7 +158,9 @@ def chunk_count(typ: SSZType) -> int:
|
||||
```
|
||||
|
||||
```python
|
||||
def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariableName]) -> Tuple[int, int, int]:
|
||||
def get_item_position(
|
||||
typ: SSZType, index_or_variable_name: Union[int, SSZVariableName]
|
||||
) -> Tuple[int, int, int]:
|
||||
"""
|
||||
Return three variables:
|
||||
(i) the index of the chunk in which the given element of the item is represented;
|
||||
@@ -171,7 +174,11 @@ def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariab
|
||||
return start // 32, start % 32, start % 32 + item_length(typ.elem_type)
|
||||
elif issubclass(typ, Container):
|
||||
variable_name = index_or_variable_name
|
||||
return typ.get_field_names().index(variable_name), 0, item_length(get_elem_type(typ, variable_name))
|
||||
return (
|
||||
typ.get_field_names().index(variable_name),
|
||||
0,
|
||||
item_length(get_elem_type(typ, variable_name)),
|
||||
)
|
||||
else:
|
||||
raise Exception("Only lists/vectors/containers supported")
|
||||
```
|
||||
@@ -184,15 +191,20 @@ def get_generalized_index(typ: SSZType, *path: PyUnion[int, SSZVariableName]) ->
|
||||
"""
|
||||
root = GeneralizedIndex(1)
|
||||
for p in path:
|
||||
assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further
|
||||
if p == '__len__':
|
||||
# If we descend to a basic type, the path cannot continue further
|
||||
assert not issubclass(typ, BasicValue)
|
||||
if p == "__len__":
|
||||
assert issubclass(typ, (List, ByteList))
|
||||
typ = uint64
|
||||
root = GeneralizedIndex(root * 2 + 1)
|
||||
else:
|
||||
pos, _, _ = get_item_position(typ, p)
|
||||
base_index = (GeneralizedIndex(2) if issubclass(typ, (List, ByteList)) else GeneralizedIndex(1))
|
||||
root = GeneralizedIndex(root * base_index * get_power_of_two_ceil(chunk_count(typ)) + pos)
|
||||
base_index = (
|
||||
GeneralizedIndex(2) if issubclass(typ, (List, ByteList)) else GeneralizedIndex(1)
|
||||
)
|
||||
root = GeneralizedIndex(
|
||||
root * base_index * get_power_of_two_ceil(chunk_count(typ)) + pos
|
||||
)
|
||||
typ = get_elem_type(typ, p)
|
||||
return root
|
||||
```
|
||||
@@ -340,22 +352,24 @@ def calculate_merkle_root(leaf: Bytes32, proof: Sequence[Bytes32], index: Genera
|
||||
```
|
||||
|
||||
```python
|
||||
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
|
||||
def verify_merkle_proof(
|
||||
leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root
|
||||
) -> bool:
|
||||
return calculate_merkle_root(leaf, proof, index) == root
|
||||
```
|
||||
|
||||
Now for multi-item proofs:
|
||||
|
||||
```python
|
||||
def calculate_multi_merkle_root(leaves: Sequence[Bytes32],
|
||||
proof: Sequence[Bytes32],
|
||||
indices: Sequence[GeneralizedIndex]) -> Root:
|
||||
def calculate_multi_merkle_root(
|
||||
leaves: Sequence[Bytes32], proof: Sequence[Bytes32], indices: Sequence[GeneralizedIndex]
|
||||
) -> Root:
|
||||
assert len(leaves) == len(indices)
|
||||
helper_indices = get_helper_indices(indices)
|
||||
assert len(proof) == len(helper_indices)
|
||||
objects = {
|
||||
**{index: node for index, node in zip(indices, leaves)},
|
||||
**{index: node for index, node in zip(helper_indices, proof)}
|
||||
**{index: node for index, node in zip(helper_indices, proof)},
|
||||
}
|
||||
keys = sorted(objects.keys(), reverse=True)
|
||||
pos = 0
|
||||
@@ -363,8 +377,7 @@ def calculate_multi_merkle_root(leaves: Sequence[Bytes32],
|
||||
k = keys[pos]
|
||||
if k in objects and k ^ 1 in objects and k // 2 not in objects:
|
||||
objects[GeneralizedIndex(k // 2)] = hash(
|
||||
objects[GeneralizedIndex((k | 1) ^ 1)] +
|
||||
objects[GeneralizedIndex(k | 1)]
|
||||
objects[GeneralizedIndex((k | 1) ^ 1)] + objects[GeneralizedIndex(k | 1)]
|
||||
)
|
||||
keys.append(GeneralizedIndex(k // 2))
|
||||
pos += 1
|
||||
@@ -372,10 +385,12 @@ def calculate_multi_merkle_root(leaves: Sequence[Bytes32],
|
||||
```
|
||||
|
||||
```python
|
||||
def verify_merkle_multiproof(leaves: Sequence[Bytes32],
|
||||
proof: Sequence[Bytes32],
|
||||
indices: Sequence[GeneralizedIndex],
|
||||
root: Root) -> bool:
|
||||
def verify_merkle_multiproof(
|
||||
leaves: Sequence[Bytes32],
|
||||
proof: Sequence[Bytes32],
|
||||
indices: Sequence[GeneralizedIndex],
|
||||
root: Root,
|
||||
) -> bool:
|
||||
return calculate_multi_merkle_root(leaves, proof, indices) == root
|
||||
```
|
||||
|
||||
|
||||
@@ -178,10 +178,12 @@ variable_parts = [serialize(element) if is_variable_size(element) else b"" for e
|
||||
# Compute and check lengths
|
||||
fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part in fixed_parts]
|
||||
variable_lengths = [len(part) for part in variable_parts]
|
||||
assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE)
|
||||
assert sum(fixed_lengths + variable_lengths) < 2 ** (BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE)
|
||||
|
||||
# Interleave offsets of variable-size parts with fixed-size parts
|
||||
variable_offsets = [serialize(uint32(sum(fixed_lengths + variable_lengths[:i]))) for i in range(len(value))]
|
||||
variable_offsets = [
|
||||
serialize(uint32(sum(fixed_lengths + variable_lengths[:i]))) for i in range(len(value))
|
||||
]
|
||||
fixed_parts = [part if part != None else variable_offsets[i] for i, part in enumerate(fixed_parts)]
|
||||
|
||||
# Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts
|
||||
|
||||
Reference in New Issue
Block a user