mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
12 Commits
fix-backwa
...
gRPC-fallb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f828bdd88 | ||
|
|
0fcb922702 | ||
|
|
3646a77bfb | ||
|
|
1541558261 | ||
|
|
17413b52ed | ||
|
|
a651e7f0ac | ||
|
|
3e1cb45e92 | ||
|
|
fc2dcb0e88 | ||
|
|
888db581dd | ||
|
|
f1d2ee72e2 | ||
|
|
31f18b9f60 | ||
|
|
6462c997e9 |
19
CHANGELOG.md
19
CHANGELOG.md
@@ -4,6 +4,25 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
|
||||
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
|
||||
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
|
||||
|
||||
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
|
||||
|
||||
Release highlights:
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0"
|
||||
consensus_spec_version = "v1.7.0-alpha.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
|
||||
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
|
||||
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
|
||||
"general": "sha256-b+rJOuVqq+Dy53quPcNYcQwPFoMU7Wp7tdUVe7n0g8w=",
|
||||
"minimal": "sha256-qxRIxtjPxVsVCY90WsBJKhk0027XDSmhjnRvRN14V1c=",
|
||||
"mainnet": "sha256-NsuOQG3LzeiEE1TrWuvQ6vu6BboHv7h7f/RTS0pWkCs=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
|
||||
integrity = "sha256-hwNdUBgdBrkk6pWIpNYbzbwswUuOu6AMD2exN8uv+QQ=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -212,7 +212,8 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex += primitives.ValidatorIndex(bound)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
|
||||
|
||||
@@ -56,9 +56,7 @@ func (r StateRoots) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
func (r StateRoots) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := make([]byte, fieldparams.StateRootsLength*32)
|
||||
for i, r32 := range r {
|
||||
for j, rr := range r32 {
|
||||
marshalled[i*32+j] = rr
|
||||
}
|
||||
copy(marshalled[i*32:(i+1)*32], r32[:])
|
||||
}
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
2
changelog/potuz_update_spectests.md
Normal file
2
changelog/potuz_update_spectests.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Update spectests to v1.7.0-alpha.0
|
||||
3
changelog/pvl-changelog-v7.1.2.md
Normal file
3
changelog/pvl-changelog-v7.1.2.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Updated changelog for v7.1.2
|
||||
3
changelog/satushh-opt.md
Normal file
3
changelog/satushh-opt.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in state (MarshalSSZTo): use copy() instead of byte-by-byte loop which isn't required.
|
||||
@@ -55,7 +55,8 @@ var placeholderFields = []string{
|
||||
"MAX_REQUEST_BLOB_SIDECARS_FULU",
|
||||
"MAX_REQUEST_INCLUSION_LIST",
|
||||
"MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests
|
||||
"NUMBER_OF_COLUMNS", // Configured as a constant in config/fieldparams/mainnet.go
|
||||
"MIN_BUILDER_WITHDRAWABILITY_DELAY",
|
||||
"NUMBER_OF_COLUMNS", // Configured as a constant in config/fieldparams/mainnet.go
|
||||
"PAYLOAD_ATTESTATION_DUE_BPS",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF_BPS",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v1.6.0
|
||||
version: v1.7.0-alpha.0
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -57,6 +57,12 @@ exceptions:
|
||||
- PAYLOAD_STATUS_EMPTY#gloas
|
||||
- PAYLOAD_STATUS_FULL#gloas
|
||||
- PAYLOAD_STATUS_PENDING#gloas
|
||||
- ATTESTATION_TIMELINESS_INDEX#gloas
|
||||
- BUILDER_INDEX_FLAG#gloas
|
||||
- BUILDER_INDEX_SELF_BUILD#gloas
|
||||
- DOMAIN_PROPOSER_PREFERENCES#gloas
|
||||
- NUM_BLOCK_TIMELINESS_DEADLINES#gloas
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
@@ -76,6 +82,7 @@ exceptions:
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
|
||||
ssz_objects:
|
||||
# Not implemented
|
||||
@@ -103,6 +110,9 @@ exceptions:
|
||||
- PayloadAttestationMessage#gloas
|
||||
- SignedExecutionPayloadEnvelope#gloas
|
||||
- SignedExecutionPayloadBid#gloas
|
||||
- Builder#gloas
|
||||
- ProposerPreferences#gloas
|
||||
- SignedProposerPreferences#gloas
|
||||
|
||||
dataclasses:
|
||||
# Not implemented
|
||||
@@ -331,10 +341,8 @@ exceptions:
|
||||
- get_ptc#gloas
|
||||
- get_ptc_assignment#gloas
|
||||
- get_weight#gloas
|
||||
- has_builder_withdrawal_credential#gloas
|
||||
- has_compounding_withdrawal_credential#gloas
|
||||
- is_attestation_same_slot#gloas
|
||||
- is_builder_payment_withdrawable#gloas
|
||||
- is_builder_withdrawal_credential#gloas
|
||||
- is_merge_transition_complete#gloas
|
||||
- is_parent_block_full#gloas
|
||||
@@ -358,7 +366,6 @@ exceptions:
|
||||
- process_proposer_slashing#gloas
|
||||
- process_slot#gloas
|
||||
- process_withdrawals#gloas
|
||||
- remove_flag#gloas
|
||||
- should_extend_payload#gloas
|
||||
- update_latest_messages#gloas
|
||||
- upgrade_to_gloas#gloas
|
||||
@@ -368,3 +375,55 @@ exceptions:
|
||||
- verify_data_column_sidecar_inclusion_proof#gloas
|
||||
- verify_execution_payload_envelope_signature#gloas
|
||||
- verify_execution_payload_bid_signature#gloas
|
||||
- add_builder_to_registry#gloas
|
||||
- apply_deposit_for_builder#gloas
|
||||
- apply_withdrawals#capella
|
||||
- apply_withdrawals#gloas
|
||||
- can_builder_cover_bid#gloas
|
||||
- compute_proposer_score#phase0
|
||||
- convert_builder_index_to_validator_index#gloas
|
||||
- convert_validator_index_to_builder_index#gloas
|
||||
- get_attestation_score#gloas
|
||||
- get_attestation_score#phase0
|
||||
- get_balance_after_withdrawals#capella
|
||||
- get_builder_from_deposit#gloas
|
||||
- get_builder_withdrawals#gloas
|
||||
- get_builders_sweep_withdrawals#gloas
|
||||
- get_index_for_new_builder#gloas
|
||||
- get_pending_balance_to_withdraw_for_builder#gloas
|
||||
- get_pending_partial_withdrawals#electra
|
||||
- get_proposer_preferences_signature#gloas
|
||||
- get_upcoming_proposal_slots#gloas
|
||||
- get_validators_sweep_withdrawals#capella
|
||||
- get_validators_sweep_withdrawals#electra
|
||||
- initiate_builder_exit#gloas
|
||||
- is_active_builder#gloas
|
||||
- is_builder_index#gloas
|
||||
- is_eligible_for_partial_withdrawals#electra
|
||||
- is_head_late#gloas
|
||||
- is_head_weak#gloas
|
||||
- is_parent_strong#gloas
|
||||
- is_proposer_equivocation#phase0
|
||||
- is_valid_proposal_slot#gloas
|
||||
- process_deposit_request#gloas
|
||||
- process_voluntary_exit#gloas
|
||||
- record_block_timeliness#gloas
|
||||
- record_block_timeliness#phase0
|
||||
- should_apply_proposer_boost#gloas
|
||||
- update_builder_pending_withdrawals#gloas
|
||||
- update_next_withdrawal_builder_index#gloas
|
||||
- update_next_withdrawal_index#capella
|
||||
- update_next_withdrawal_validator_index#capella
|
||||
- update_payload_expected_withdrawals#gloas
|
||||
- update_pending_partial_withdrawals#electra
|
||||
- update_proposer_boost_root#gloas
|
||||
- update_proposer_boost_root#phase0
|
||||
|
||||
presets:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_REGISTRY_LIMIT#gloas
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
@@ -304,16 +304,6 @@
|
||||
GENESIS_SLOT: Slot = 0
|
||||
</spec>
|
||||
|
||||
- name: INTERVALS_PER_SLOT
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: IntervalsPerSlot\s+.*yaml:"INTERVALS_PER_SLOT"
|
||||
regex: true
|
||||
spec: |
|
||||
<spec constant_var="INTERVALS_PER_SLOT" fork="phase0" hash="3352e419">
|
||||
INTERVALS_PER_SLOT: uint64 = 3
|
||||
</spec>
|
||||
|
||||
- name: JUSTIFICATION_BITS_LENGTH
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
|
||||
@@ -698,7 +698,7 @@
|
||||
- name: compute_matrix
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="compute_matrix" fork="fulu" hash="b39370ca">
|
||||
<spec fn="compute_matrix" fork="fulu" hash="0b88eac1">
|
||||
def compute_matrix(blobs: Sequence[Blob]) -> Sequence[MatrixEntry]:
|
||||
"""
|
||||
Return the full, flattened sequence of matrix entries.
|
||||
@@ -714,8 +714,8 @@
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
row_index=blob_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
@@ -739,7 +739,7 @@
|
||||
- file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_electra.go
|
||||
search: func computeOnChainAggregate(
|
||||
spec: |
|
||||
<spec fn="compute_on_chain_aggregate" fork="electra" hash="128055d6">
|
||||
<spec fn="compute_on_chain_aggregate" fork="electra" hash="f020af4c">
|
||||
def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:
|
||||
aggregates = sorted(
|
||||
network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0]
|
||||
@@ -760,8 +760,8 @@
|
||||
return Attestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=data,
|
||||
committee_bits=committee_bits,
|
||||
signature=signature,
|
||||
committee_bits=committee_bits,
|
||||
)
|
||||
</spec>
|
||||
|
||||
@@ -2366,40 +2366,18 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="09191977">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
|
||||
epoch = get_current_epoch(state)
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="d6a98c14">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - MAX_EFFECTIVE_BALANCE,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals
|
||||
|
||||
# Get validators sweep withdrawals
|
||||
validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
return withdrawals, processed_validators_sweep_count
|
||||
</spec>
|
||||
|
||||
- name: get_expected_withdrawals#electra
|
||||
@@ -2407,80 +2385,26 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="060932cd">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
epoch = get_current_epoch(state)
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="cfce862b">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64, uint64]:
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
processed_partial_withdrawals_count = 0
|
||||
|
||||
# [New in Electra:EIP7251]
|
||||
# Consume pending partial withdrawals
|
||||
for withdrawal in state.pending_partial_withdrawals:
|
||||
if (
|
||||
withdrawal.withdrawable_epoch > epoch
|
||||
or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP
|
||||
):
|
||||
break
|
||||
# Get partial withdrawals
|
||||
partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
|
||||
get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(partial_withdrawals)
|
||||
|
||||
validator = state.validators[withdrawal.validator_index]
|
||||
has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
total_withdrawn = sum(
|
||||
w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index
|
||||
)
|
||||
balance = state.balances[withdrawal.validator_index] - total_withdrawn
|
||||
has_excess_balance = balance > MIN_ACTIVATION_BALANCE
|
||||
if (
|
||||
validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
and has_sufficient_effective_balance
|
||||
and has_excess_balance
|
||||
):
|
||||
withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount)
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=withdrawal.validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=withdrawable_balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
# Get validators sweep withdrawals
|
||||
validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
processed_partial_withdrawals_count += 1
|
||||
|
||||
# Sweep for remaining.
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
# [Modified in Electra:EIP7251]
|
||||
total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index)
|
||||
balance = state.balances[validator_index] - total_withdrawn
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
# [Modified in Electra:EIP7251]
|
||||
amount=balance - get_max_effective_balance(validator),
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals, processed_partial_withdrawals_count
|
||||
# [Modified in Electra:EIP7251]
|
||||
return withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count
|
||||
</spec>
|
||||
|
||||
- name: get_filtered_block_tree
|
||||
@@ -3053,7 +2977,7 @@
|
||||
- name: get_proposer_head
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_proposer_head" fork="phase0" hash="15d44290">
|
||||
<spec fn="get_proposer_head" fork="phase0" hash="99e8fc05">
|
||||
def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
@@ -3084,7 +3008,10 @@
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
|
||||
# Check that the missing votes are assigned to the parent and not being hoarded.
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
parent_strong = is_parent_strong(store, head_root)
|
||||
|
||||
# Re-org more aggressively if there is a proposer equivocation in the previous slot.
|
||||
proposer_equivocation = is_proposer_equivocation(store, head_root)
|
||||
|
||||
if all(
|
||||
[
|
||||
@@ -3100,6 +3027,8 @@
|
||||
):
|
||||
# We can re-org the current head by building upon its parent block.
|
||||
return parent_root
|
||||
elif all([head_weak, current_time_ok, proposer_equivocation]):
|
||||
return parent_root
|
||||
else:
|
||||
return head_root
|
||||
</spec>
|
||||
@@ -3117,11 +3046,10 @@
|
||||
- name: get_proposer_score
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_proposer_score" fork="phase0" hash="164b8de0">
|
||||
<spec fn="get_proposer_score" fork="phase0" hash="2c8d8a27">
|
||||
def get_proposer_score(store: Store) -> Gwei:
|
||||
justified_checkpoint_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
committee_weight = get_total_active_balance(justified_checkpoint_state) // SLOTS_PER_EPOCH
|
||||
return (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
return compute_proposer_score(justified_checkpoint_state)
|
||||
</spec>
|
||||
|
||||
- name: get_randao_mix
|
||||
@@ -3509,26 +3437,10 @@
|
||||
- file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go
|
||||
search: func (f *ForkChoice) Weight(
|
||||
spec: |
|
||||
<spec fn="get_weight" fork="phase0" hash="f2e4e8ef">
|
||||
<spec fn="get_weight" fork="phase0" hash="b18bf25c">
|
||||
def get_weight(store: Store, root: Root) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
unslashed_and_active_indices = [
|
||||
i
|
||||
for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
if not state.validators[i].slashed
|
||||
]
|
||||
attestation_score = Gwei(
|
||||
sum(
|
||||
state.validators[i].effective_balance
|
||||
for i in unslashed_and_active_indices
|
||||
if (
|
||||
i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot)
|
||||
== root
|
||||
)
|
||||
)
|
||||
)
|
||||
attestation_score = get_attestation_score(store, root, state)
|
||||
if store.proposer_boost_root == Root():
|
||||
# Return only attestation score if ``proposer_boost_root`` is not set
|
||||
return attestation_score
|
||||
@@ -3615,7 +3527,7 @@
|
||||
- file: beacon-chain/core/transition/state.go
|
||||
search: func GenesisBeaconState(
|
||||
spec: |
|
||||
<spec fn="initialize_beacon_state_from_eth1" fork="phase0" hash="c69537d6">
|
||||
<spec fn="initialize_beacon_state_from_eth1" fork="phase0" hash="d3a0ddd4">
|
||||
def initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash: Hash32, eth1_timestamp: uint64, deposits: Sequence[Deposit]
|
||||
) -> BeaconState:
|
||||
@@ -3627,7 +3539,7 @@
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
eth1_data=Eth1Data(deposit_count=uint64(len(deposits)), block_hash=eth1_block_hash),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash]
|
||||
* EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
@@ -4162,10 +4074,11 @@
|
||||
- name: is_parent_strong
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="is_parent_strong" fork="phase0" hash="e06641a8">
|
||||
def is_parent_strong(store: Store, parent_root: Root) -> bool:
|
||||
<spec fn="is_parent_strong" fork="phase0" hash="02a3fd0b">
|
||||
def is_parent_strong(store: Store, root: Root) -> bool:
|
||||
justified_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
parent_threshold = calculate_committee_fraction(justified_state, REORG_PARENT_WEIGHT_THRESHOLD)
|
||||
parent_root = store.blocks[root].parent_root
|
||||
parent_weight = get_weight(store, parent_root)
|
||||
return parent_weight > parent_threshold
|
||||
</spec>
|
||||
@@ -4683,7 +4596,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="phase0" hash="aff24b59">
|
||||
<spec fn="on_block" fork="phase0" hash="5f45947a">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
block = signed_block.message
|
||||
# Parent block must be known
|
||||
@@ -4713,19 +4626,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4739,7 +4641,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="bellatrix" hash="a3193d92">
|
||||
<spec fn="on_block" fork="bellatrix" hash="e81d01c3">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4780,19 +4682,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4806,7 +4697,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="capella" hash="560056ad">
|
||||
<spec fn="on_block" fork="capella" hash="7450531c">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4839,19 +4730,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4865,7 +4745,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="deneb" hash="9565acee">
|
||||
<spec fn="on_block" fork="deneb" hash="bbad196e">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4903,19 +4783,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4929,7 +4798,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="fulu" hash="4f955de9">
|
||||
<spec fn="on_block" fork="fulu" hash="b8f279b9">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4967,19 +4836,8 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -5074,7 +4932,7 @@
|
||||
- name: prepare_execution_payload#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="28db1590">
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="c258893e">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5087,12 +4945,15 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
# [New in Capella]
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
# [New in Capella]
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
withdrawals=withdrawals,
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
@@ -5105,7 +4966,7 @@
|
||||
- name: prepare_execution_payload#deneb
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="f3387ec6">
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="59f61f3a">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5117,11 +4978,13 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
withdrawals=withdrawals,
|
||||
# [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header),
|
||||
)
|
||||
@@ -5136,7 +4999,7 @@
|
||||
- name: prepare_execution_payload#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="567b3739">
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="5414b883">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5149,7 +5012,7 @@
|
||||
|
||||
# [Modified in EIP7251]
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
withdrawals, _, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
@@ -5171,7 +5034,7 @@
|
||||
- file: beacon-chain/core/blocks/attestation.go
|
||||
search: func ProcessAttestationNoVerifySignature(
|
||||
spec: |
|
||||
<spec fn="process_attestation" fork="phase0" hash="6ac78cd0">
|
||||
<spec fn="process_attestation" fork="phase0" hash="d8e86aa9">
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
@@ -5183,8 +5046,8 @@
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
pending_attestation = PendingAttestation(
|
||||
data=data,
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=data,
|
||||
inclusion_delay=state.slot - data.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
@@ -7208,31 +7071,18 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="capella" hash="ed6a9c5a">
|
||||
<spec fn="process_withdrawals" fork="capella" hash="901f9fc4">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
expected_withdrawals = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == expected_withdrawals
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_validators_sweep_count = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == withdrawals
|
||||
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
</spec>
|
||||
|
||||
- name: process_withdrawals#electra
|
||||
@@ -7240,39 +7090,23 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="electra" hash="dd99a91f">
|
||||
<spec fn="process_withdrawals" fork="electra" hash="67870972">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# [Modified in Electra:EIP7251]
|
||||
expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state)
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count = (
|
||||
get_expected_withdrawals(state)
|
||||
)
|
||||
assert payload.withdrawals == withdrawals
|
||||
|
||||
assert payload.withdrawals == expected_withdrawals
|
||||
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
# [New in Electra:EIP7251]
|
||||
# Update pending partial withdrawals
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[
|
||||
processed_partial_withdrawals_count:
|
||||
]
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
update_pending_partial_withdrawals(state, processed_partial_withdrawals_count)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
</spec>
|
||||
|
||||
- name: queue_excess_active_balance
|
||||
@@ -7303,7 +7137,7 @@
|
||||
- name: recover_matrix
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="recover_matrix" fork="fulu" hash="9b01f005">
|
||||
<spec fn="recover_matrix" fork="fulu" hash="3db21f50">
|
||||
def recover_matrix(
|
||||
partial_matrix: Sequence[MatrixEntry], blob_count: uint64
|
||||
) -> Sequence[MatrixEntry]:
|
||||
@@ -7323,8 +7157,8 @@
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
row_index=blob_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
@@ -7373,7 +7207,7 @@
|
||||
- file: beacon-chain/forkchoice/ro.go
|
||||
search: func (ro *ROForkChoice) ShouldOverrideFCU(
|
||||
spec: |
|
||||
<spec fn="should_override_forkchoice_update" fork="bellatrix" hash="9a8043af">
|
||||
<spec fn="should_override_forkchoice_update" fork="bellatrix" hash="c055d92a">
|
||||
def should_override_forkchoice_update(store: Store, head_root: Root) -> bool:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
@@ -7414,7 +7248,7 @@
|
||||
# `store.time` early, or by counting queued attestations during the head block's slot.
|
||||
if current_slot > head_block.slot:
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
parent_strong = is_parent_strong(store, head_root)
|
||||
else:
|
||||
head_weak = True
|
||||
parent_strong = True
|
||||
|
||||
@@ -278,7 +278,6 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
"//testing/spectest/shared/phase0/operations:go_default_library",
|
||||
|
||||
@@ -2,10 +2,9 @@ package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/ssz_static"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_SSZStatic(t *testing.T) {
|
||||
ssz_static.RunSSZStaticTests(t, "mainnet")
|
||||
t.Skip("Gloas is not implemented")
|
||||
// ssz_static.RunSSZStaticTests(t, "mainnet")
|
||||
}
|
||||
|
||||
@@ -288,7 +288,6 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
"//testing/spectest/shared/phase0/operations:go_default_library",
|
||||
|
||||
@@ -2,10 +2,9 @@ package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/ssz_static"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_SSZStatic(t *testing.T) {
|
||||
ssz_static.RunSSZStaticTests(t, "minimal")
|
||||
t.Skip("Gloas is not implemented")
|
||||
// ssz_static.RunSSZStaticTests(t, "minimal")
|
||||
}
|
||||
|
||||
@@ -62,8 +62,17 @@ func runTest(t *testing.T, config string, fork int, basePath string) { // nolint
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, version.String(fork), folderPath)
|
||||
}
|
||||
|
||||
var skipTests = map[string]bool{
|
||||
// Skipping because of #4807 backporting issues
|
||||
"voting_source_beyond_two_epoch": true,
|
||||
"justified_update_always_if_better": true,
|
||||
"justified_update_not_realized_finality": true,
|
||||
}
|
||||
for _, folder := range testFolders {
|
||||
if skipTests[folder.Name()] {
|
||||
t.Logf("Skipping test %s due to known issues", folder.Name())
|
||||
continue
|
||||
}
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
preStepsFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "steps.yaml")
|
||||
|
||||
@@ -10,18 +10,16 @@ import (
|
||||
)
|
||||
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.ChainClient {
|
||||
grpcClient := grpcApi.NewGrpcChainClient(validatorConn.GetGrpcClientConn())
|
||||
grpcClient := grpcApi.NewGrpcChainClientWithConnection(validatorConn)
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiChainClientWithFallback(jsonRestHandler, grpcClient)
|
||||
} else {
|
||||
return grpcClient
|
||||
}
|
||||
return grpcClient
|
||||
}
|
||||
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.PrysmChainClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewPrysmChainClient(jsonRestHandler, nodeClientFactory.NewNodeClient(validatorConn, jsonRestHandler))
|
||||
} else {
|
||||
return grpcApi.NewGrpcPrysmChainClient(validatorConn.GetGrpcClientConn())
|
||||
}
|
||||
return grpcApi.NewGrpcPrysmChainClientWithConnection(validatorConn)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_beacon_chain_client.go",
|
||||
"grpc_client_manager.go",
|
||||
"grpc_node_client.go",
|
||||
"grpc_prysm_beacon_chain_client.go",
|
||||
"grpc_validator_client.go",
|
||||
@@ -25,6 +26,7 @@ go_library(
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/helpers:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -5,38 +5,42 @@ import (
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcChainClient struct {
|
||||
beaconChainClient ethpb.BeaconChainClient
|
||||
*grpcClientManager[ethpb.BeaconChainClient]
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ChainHead(ctx context.Context, in *empty.Empty) (*ethpb.ChainHead, error) {
|
||||
return c.beaconChainClient.GetChainHead(ctx, in)
|
||||
return c.getClient().GetChainHead(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorBalances(ctx context.Context, in *ethpb.ListValidatorBalancesRequest) (*ethpb.ValidatorBalances, error) {
|
||||
return c.beaconChainClient.ListValidatorBalances(ctx, in)
|
||||
return c.getClient().ListValidatorBalances(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) Validators(ctx context.Context, in *ethpb.ListValidatorsRequest) (*ethpb.Validators, error) {
|
||||
return c.beaconChainClient.ListValidators(ctx, in)
|
||||
return c.getClient().ListValidators(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorQueue(ctx context.Context, in *empty.Empty) (*ethpb.ValidatorQueue, error) {
|
||||
return c.beaconChainClient.GetValidatorQueue(ctx, in)
|
||||
return c.getClient().GetValidatorQueue(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorPerformance(ctx context.Context, in *ethpb.ValidatorPerformanceRequest) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
return c.beaconChainClient.GetValidatorPerformance(ctx, in)
|
||||
return c.getClient().GetValidatorPerformance(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorParticipation(ctx context.Context, in *ethpb.GetValidatorParticipationRequest) (*ethpb.ValidatorParticipationResponse, error) {
|
||||
return c.beaconChainClient.GetValidatorParticipation(ctx, in)
|
||||
return c.getClient().GetValidatorParticipation(ctx, in)
|
||||
}
|
||||
|
||||
func NewGrpcChainClient(cc grpc.ClientConnInterface) iface.ChainClient {
|
||||
return &grpcChainClient{ethpb.NewBeaconChainClient(cc)}
|
||||
// NewGrpcChainClientWithConnection creates a new gRPC chain client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcChainClientWithConnection(conn validatorHelpers.NodeConnection) iface.ChainClient {
|
||||
return &grpcChainClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewBeaconChainClient),
|
||||
}
|
||||
}
|
||||
|
||||
61
validator/client/grpc-api/grpc_client_manager.go
Normal file
61
validator/client/grpc-api/grpc_client_manager.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package grpc_api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// grpcClientManager handles dynamic gRPC client recreation when the connection changes.
|
||||
// It uses generics to work with any gRPC client type.
|
||||
type grpcClientManager[T any] struct {
|
||||
conn validatorHelpers.NodeConnection
|
||||
client T
|
||||
lastHost string
|
||||
clientMu sync.RWMutex
|
||||
newClient func(grpc.ClientConnInterface) T
|
||||
}
|
||||
|
||||
// newGrpcClientManager creates a new client manager with the given connection and client constructor.
|
||||
func newGrpcClientManager[T any](
|
||||
conn validatorHelpers.NodeConnection,
|
||||
newClient func(grpc.ClientConnInterface) T,
|
||||
) *grpcClientManager[T] {
|
||||
m := &grpcClientManager[T]{
|
||||
conn: conn,
|
||||
newClient: newClient,
|
||||
client: newClient(conn.GetGrpcClientConn()),
|
||||
}
|
||||
if provider := conn.GetGrpcConnectionProvider(); provider != nil {
|
||||
m.lastHost = provider.CurrentHost()
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// getClient returns the current client, recreating it if the connection has changed.
|
||||
func (m *grpcClientManager[T]) getClient() T {
|
||||
if m.conn == nil || m.conn.GetGrpcConnectionProvider() == nil {
|
||||
return m.client
|
||||
}
|
||||
|
||||
currentHost := m.conn.GetGrpcConnectionProvider().CurrentHost()
|
||||
m.clientMu.RLock()
|
||||
if m.lastHost == currentHost {
|
||||
client := m.client
|
||||
m.clientMu.RUnlock()
|
||||
return client
|
||||
}
|
||||
m.clientMu.RUnlock()
|
||||
|
||||
// Connection changed, need to recreate client
|
||||
m.clientMu.Lock()
|
||||
defer m.clientMu.Unlock()
|
||||
// Double-check after acquiring write lock
|
||||
if m.lastHost == currentHost {
|
||||
return m.client
|
||||
}
|
||||
m.client = m.newClient(m.conn.GetGrpcClientConn())
|
||||
m.lastHost = currentHost
|
||||
return m.client
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -14,35 +14,48 @@ var (
|
||||
)
|
||||
|
||||
type grpcNodeClient struct {
|
||||
nodeClient ethpb.NodeClient
|
||||
*grpcClientManager[ethpb.NodeClient]
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) SyncStatus(ctx context.Context, in *empty.Empty) (*ethpb.SyncStatus, error) {
|
||||
return c.nodeClient.GetSyncStatus(ctx, in)
|
||||
return c.getClient().GetSyncStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Genesis(ctx context.Context, in *empty.Empty) (*ethpb.Genesis, error) {
|
||||
return c.nodeClient.GetGenesis(ctx, in)
|
||||
return c.getClient().GetGenesis(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Version(ctx context.Context, in *empty.Empty) (*ethpb.Version, error) {
|
||||
return c.nodeClient.GetVersion(ctx, in)
|
||||
return c.getClient().GetVersion(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Peers(ctx context.Context, in *empty.Empty) (*ethpb.Peers, error) {
|
||||
return c.nodeClient.ListPeers(ctx, in)
|
||||
return c.getClient().ListPeers(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) IsReady(ctx context.Context) bool {
|
||||
_, err := c.nodeClient.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get health of node")
|
||||
log.WithError(err).Debug("Failed to get health of node")
|
||||
return false
|
||||
}
|
||||
// Then check sync status - we only want fully synced nodes
|
||||
syncStatus, err := c.getClient().GetSyncStatus(ctx, &empty.Empty{})
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to get sync status of node")
|
||||
return false
|
||||
}
|
||||
if syncStatus.Syncing {
|
||||
log.Debug("Node is syncing, not fully synced")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func NewNodeClient(cc grpc.ClientConnInterface) iface.NodeClient {
|
||||
g := &grpcNodeClient{nodeClient: ethpb.NewNodeClient(cc)}
|
||||
return g
|
||||
// NewNodeClientWithConnection creates a new gRPC node client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewNodeClientWithConnection(conn validatorHelpers.NodeConnection) iface.NodeClient {
|
||||
return &grpcNodeClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewNodeClient),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcPrysmChainClient struct {
|
||||
@@ -95,6 +95,8 @@ func (c *grpcPrysmChainClient) ValidatorPerformance(ctx context.Context, in *eth
|
||||
return c.chainClient.ValidatorPerformance(ctx, in)
|
||||
}
|
||||
|
||||
func NewGrpcPrysmChainClient(cc grpc.ClientConnInterface) iface.PrysmChainClient {
|
||||
return &grpcPrysmChainClient{chainClient: &grpcChainClient{ethpb.NewBeaconChainClient(cc)}}
|
||||
// NewGrpcPrysmChainClientWithConnection creates a new gRPC Prysm chain client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcPrysmChainClientWithConnection(conn validatorHelpers.NodeConnection) iface.PrysmChainClient {
|
||||
return &grpcPrysmChainClient{chainClient: NewGrpcChainClientWithConnection(conn)}
|
||||
}
|
||||
|
||||
@@ -14,24 +14,24 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type grpcValidatorClient struct {
|
||||
beaconNodeValidatorClient ethpb.BeaconNodeValidatorClient
|
||||
isEventStreamRunning bool
|
||||
*grpcClientManager[ethpb.BeaconNodeValidatorClient]
|
||||
isEventStreamRunning bool
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error) {
|
||||
if features.Get().DisableDutiesV2 {
|
||||
return c.getDuties(ctx, in)
|
||||
}
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDutiesV2(ctx, in)
|
||||
dutiesResponse, err := c.getClient().GetDutiesV2(ctx, in)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
log.Warn("GetDutiesV2 returned status code unavailable, falling back to GetDuties")
|
||||
@@ -47,7 +47,7 @@ func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesReques
|
||||
|
||||
// getDuties is calling the v1 of get duties
|
||||
func (c *grpcValidatorClient) getDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error) {
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDuties(ctx, in)
|
||||
dutiesResponse, err := c.getClient().GetDuties(ctx, in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
client.ErrConnectionIssue,
|
||||
@@ -147,108 +147,108 @@ func toValidatorDutyV2(duty *ethpb.DutiesV2Response_Duty) (*ethpb.ValidatorDuty,
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
|
||||
return c.beaconNodeValidatorClient.CheckDoppelGanger(ctx, in)
|
||||
return c.getClient().CheckDoppelGanger(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) DomainData(ctx context.Context, in *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
return c.beaconNodeValidatorClient.DomainData(ctx, in)
|
||||
return c.getClient().DomainData(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) AttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
return c.beaconNodeValidatorClient.GetAttestationData(ctx, in)
|
||||
return c.getClient().GetAttestationData(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) BeaconBlock(ctx context.Context, in *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
return c.beaconNodeValidatorClient.GetBeaconBlock(ctx, in)
|
||||
return c.getClient().GetBeaconBlock(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) FeeRecipientByPubKey(ctx context.Context, in *ethpb.FeeRecipientByPubKeyRequest) (*ethpb.FeeRecipientByPubKeyResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetFeeRecipientByPubKey(ctx, in)
|
||||
return c.getClient().GetFeeRecipientByPubKey(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncCommitteeContribution(ctx context.Context, in *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncCommitteeContribution(ctx, in)
|
||||
return c.getClient().GetSyncCommitteeContribution(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncMessageBlockRoot(ctx, in)
|
||||
return c.getClient().GetSyncMessageBlockRoot(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncSubcommitteeIndex(ctx context.Context, in *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncSubcommitteeIndex(ctx, in)
|
||||
return c.getClient().GetSyncSubcommitteeIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) MultipleValidatorStatus(ctx context.Context, in *ethpb.MultipleValidatorStatusRequest) (*ethpb.MultipleValidatorStatusResponse, error) {
|
||||
return c.beaconNodeValidatorClient.MultipleValidatorStatus(ctx, in)
|
||||
return c.getClient().MultipleValidatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) PrepareBeaconProposer(ctx context.Context, in *ethpb.PrepareBeaconProposerRequest) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.PrepareBeaconProposer(ctx, in)
|
||||
return c.getClient().PrepareBeaconProposer(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeAttestation(ctx, in)
|
||||
return c.getClient().ProposeAttestation(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeAttestationElectra(ctx context.Context, in *ethpb.SingleAttestation) (*ethpb.AttestResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeAttestationElectra(ctx, in)
|
||||
return c.getClient().ProposeAttestationElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeBeaconBlock(ctx, in)
|
||||
return c.getClient().ProposeBeaconBlock(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeExit(ctx, in)
|
||||
return c.getClient().ProposeExit(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) StreamBlocksAltair(ctx context.Context, in *ethpb.StreamBlocksRequest) (ethpb.BeaconNodeValidator_StreamBlocksAltairClient, error) {
|
||||
return c.beaconNodeValidatorClient.StreamBlocksAltair(ctx, in)
|
||||
return c.getClient().StreamBlocksAltair(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProof(ctx, in)
|
||||
return c.getClient().SubmitAggregateSelectionProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitAggregateSelectionProofElectra(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionElectraResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProofElectra(ctx, in)
|
||||
return c.getClient().SubmitAggregateSelectionProofElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedAggregateSelectionProof(ctx, in)
|
||||
return c.getClient().SubmitSignedAggregateSelectionProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedAggregateSelectionProofElectra(ctx context.Context, in *ethpb.SignedAggregateSubmitElectraRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedAggregateSelectionProofElectra(ctx, in)
|
||||
return c.getClient().SubmitSignedAggregateSelectionProofElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedContributionAndProof(ctx, in)
|
||||
return c.getClient().SubmitSignedContributionAndProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSyncMessage(ctx, in)
|
||||
return c.getClient().SubmitSyncMessage(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitValidatorRegistrations(ctx, in)
|
||||
return c.getClient().SubmitValidatorRegistrations(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubscribeCommitteeSubnets(ctx, in)
|
||||
return c.getClient().SubscribeCommitteeSubnets(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ValidatorIndex(ctx, in)
|
||||
return c.getClient().ValidatorIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ValidatorStatus(ctx context.Context, in *ethpb.ValidatorStatusRequest) (*ethpb.ValidatorStatusResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ValidatorStatus(ctx, in)
|
||||
return c.getClient().ValidatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (c *grpcValidatorClient) WaitForChainStart(ctx context.Context, in *empty.Empty) (*ethpb.ChainStartResponse, error) {
|
||||
stream, err := c.beaconNodeValidatorClient.WaitForChainStart(ctx, in)
|
||||
stream, err := c.getClient().WaitForChainStart(ctx, in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
client.ErrConnectionIssue,
|
||||
@@ -260,13 +260,13 @@ func (c *grpcValidatorClient) WaitForChainStart(ctx context.Context, in *empty.E
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) AssignValidatorToSubnet(ctx context.Context, in *ethpb.AssignValidatorToSubnetRequest) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.AssignValidatorToSubnet(ctx, in)
|
||||
return c.getClient().AssignValidatorToSubnet(ctx, in)
|
||||
}
|
||||
func (c *grpcValidatorClient) AggregatedSigAndAggregationBits(
|
||||
ctx context.Context,
|
||||
in *ethpb.AggregatedSigAndAggregationBitsRequest,
|
||||
) (*ethpb.AggregatedSigAndAggregationBitsResponse, error) {
|
||||
return c.beaconNodeValidatorClient.AggregatedSigAndAggregationBits(ctx, in)
|
||||
return c.getClient().AggregatedSigAndAggregationBits(ctx, in)
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) AggregatedSelections(context.Context, []iface.BeaconCommitteeSelection) ([]iface.BeaconCommitteeSelection, error) {
|
||||
@@ -277,8 +277,12 @@ func (*grpcValidatorClient) AggregatedSyncSelections(context.Context, []iface.Sy
|
||||
return nil, iface.ErrNotSupported
|
||||
}
|
||||
|
||||
func NewGrpcValidatorClient(cc grpc.ClientConnInterface) iface.ValidatorClient {
|
||||
return &grpcValidatorClient{ethpb.NewBeaconNodeValidatorClient(cc), false}
|
||||
// NewGrpcValidatorClientWithConnection creates a new gRPC validator client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcValidatorClientWithConnection(conn validatorHelpers.NodeConnection) iface.ValidatorClient {
|
||||
return &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewBeaconNodeValidatorClient),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []string, eventsChannel chan<- *eventClient.Event) {
|
||||
@@ -308,7 +312,7 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
log.Warn("gRPC only supports the head topic, other topics will be ignored")
|
||||
}
|
||||
|
||||
stream, err := c.beaconNodeValidatorClient.StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
stream, err := c.getClient().StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
@@ -374,11 +378,29 @@ func (c *grpcValidatorClient) EventStreamIsRunning() bool {
|
||||
return c.isEventStreamRunning
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) Host() string {
|
||||
log.Warn(iface.ErrNotSupported)
|
||||
return ""
|
||||
func (c *grpcValidatorClient) Host() string {
|
||||
if c.grpcClientManager == nil || c.grpcClientManager.conn == nil || c.grpcClientManager.conn.GetGrpcConnectionProvider() == nil {
|
||||
return ""
|
||||
}
|
||||
return c.grpcClientManager.conn.GetGrpcConnectionProvider().CurrentHost()
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) SetHost(_ string) {
|
||||
log.Warn(iface.ErrNotSupported)
|
||||
func (c *grpcValidatorClient) SetHost(host string) {
|
||||
if c.grpcClientManager == nil || c.grpcClientManager.conn == nil {
|
||||
return
|
||||
}
|
||||
provider := c.grpcClientManager.conn.GetGrpcConnectionProvider()
|
||||
if provider == nil {
|
||||
return
|
||||
}
|
||||
// Find the index of the requested host and switch to it
|
||||
for i, h := range provider.Hosts() {
|
||||
if h == host {
|
||||
if err := provider.SetHost(i); err != nil {
|
||||
log.WithError(err).WithField("host", host).Error("Failed to set gRPC host")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
log.WithField("host", host).Warn("Requested gRPC host not found in configured endpoints")
|
||||
}
|
||||
|
||||
@@ -133,7 +133,12 @@ func TestWaitForChainStart_StreamSetupFails(t *testing.T) {
|
||||
gomock.Any(),
|
||||
).Return(nil, errors.New("failed stream"))
|
||||
|
||||
validatorClient := &grpcValidatorClient{beaconNodeValidatorClient, true}
|
||||
validatorClient := &grpcValidatorClient{
|
||||
grpcClientManager: &grpcClientManager[eth.BeaconNodeValidatorClient]{
|
||||
client: beaconNodeValidatorClient,
|
||||
},
|
||||
isEventStreamRunning: true,
|
||||
}
|
||||
_, err := validatorClient.WaitForChainStart(t.Context(), &emptypb.Empty{})
|
||||
want := "could not setup beacon chain ChainStart streaming client"
|
||||
assert.ErrorContains(t, want, err)
|
||||
@@ -146,7 +151,12 @@ func TestStartEventStream(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
beaconNodeValidatorClient := mock2.NewMockBeaconNodeValidatorClient(ctrl)
|
||||
grpcClient := &grpcValidatorClient{beaconNodeValidatorClient, true}
|
||||
grpcClient := &grpcValidatorClient{
|
||||
grpcClientManager: &grpcClientManager[eth.BeaconNodeValidatorClient]{
|
||||
client: beaconNodeValidatorClient,
|
||||
},
|
||||
isEventStreamRunning: true,
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
topics []string
|
||||
|
||||
@@ -9,10 +9,9 @@ import (
|
||||
)
|
||||
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.NodeClient {
|
||||
grpcClient := grpcApi.NewNodeClient(validatorConn.GetGrpcClientConn())
|
||||
grpcClient := grpcApi.NewNodeClientWithConnection(validatorConn)
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewNodeClientWithFallback(jsonRestHandler, grpcClient)
|
||||
} else {
|
||||
return grpcClient
|
||||
}
|
||||
return grpcClient
|
||||
}
|
||||
|
||||
@@ -134,18 +134,34 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
|
||||
s.ctx = grpcutil.AppendHeaders(ctx, cfg.GRPCHeaders)
|
||||
|
||||
grpcConn, err := grpc.DialContext(ctx, cfg.BeaconNodeGRPCEndpoint, dialOpts...)
|
||||
if err != nil {
|
||||
return s, err
|
||||
var grpcConn *grpc.ClientConn
|
||||
var grpcProvider validatorHelpers.GrpcConnectionProvider
|
||||
|
||||
if cfg.BeaconNodeGRPCEndpoint != "" {
|
||||
var err error
|
||||
grpcProvider, err = validatorHelpers.NewGrpcConnectionProvider(ctx, cfg.BeaconNodeGRPCEndpoint, dialOpts)
|
||||
if err != nil {
|
||||
return s, errors.Wrap(err, "failed to create gRPC connection provider")
|
||||
}
|
||||
grpcConn = grpcProvider.CurrentConn()
|
||||
}
|
||||
|
||||
if cfg.BeaconNodeCert != "" {
|
||||
log.Info("Established secure gRPC connection")
|
||||
}
|
||||
|
||||
connOpts := []validatorHelpers.NodeConnectionOption{
|
||||
validatorHelpers.WithBeaconApiHeaders(cfg.BeaconApiHeaders),
|
||||
validatorHelpers.WithBeaconApiTimeout(cfg.BeaconApiTimeout),
|
||||
}
|
||||
if grpcProvider != nil {
|
||||
connOpts = append(connOpts, validatorHelpers.WithGrpcConnectionProvider(grpcProvider))
|
||||
}
|
||||
|
||||
s.conn = validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
cfg.BeaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiHeaders(cfg.BeaconApiHeaders),
|
||||
validatorHelpers.WithBeaconApiTimeout(cfg.BeaconApiTimeout),
|
||||
connOpts...,
|
||||
)
|
||||
|
||||
return s, nil
|
||||
@@ -210,6 +226,7 @@ func (v *ValidatorService) Start() {
|
||||
graffitiOrderedIndex: graffitiOrderedIndex,
|
||||
beaconNodeHosts: hosts,
|
||||
currentHostIndex: 0,
|
||||
grpcConnectionProvider: v.conn.GetGrpcConnectionProvider(),
|
||||
validatorClient: validatorClient,
|
||||
chainClient: beaconChainClientFactory.NewChainClient(v.conn, restHandler),
|
||||
nodeClient: nodeclientfactory.NewNodeClient(v.conn, restHandler),
|
||||
|
||||
@@ -15,7 +15,6 @@ func NewValidatorClient(
|
||||
) iface.ValidatorClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiValidatorClient(jsonRestHandler, opt...)
|
||||
} else {
|
||||
return grpcApi.NewGrpcValidatorClient(validatorConn.GetGrpcClientConn())
|
||||
}
|
||||
return grpcApi.NewGrpcValidatorClientWithConnection(validatorConn)
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/db"
|
||||
dbCommon "github.com/OffchainLabs/prysm/v7/validator/db/common"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/graffiti"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/local"
|
||||
remoteweb3signer "github.com/OffchainLabs/prysm/v7/validator/keymanager/remote-web3signer"
|
||||
@@ -82,6 +83,7 @@ type validator struct {
|
||||
graffitiOrderedIndex uint64
|
||||
beaconNodeHosts []string
|
||||
currentHostIndex uint64
|
||||
grpcConnectionProvider validatorHelpers.GrpcConnectionProvider
|
||||
validatorClient iface.ValidatorClient
|
||||
chainClient iface.ChainClient
|
||||
nodeClient iface.NodeClient
|
||||
@@ -1261,15 +1263,35 @@ func (v *validator) Host() string {
|
||||
}
|
||||
|
||||
func (v *validator) changeHost() {
|
||||
next := (v.currentHostIndex + 1) % uint64(len(v.beaconNodeHosts))
|
||||
hosts := v.hosts()
|
||||
if len(hosts) <= 1 {
|
||||
return
|
||||
}
|
||||
next := (v.currentHostIndex + 1) % uint64(len(hosts))
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentHost": v.beaconNodeHosts[v.currentHostIndex],
|
||||
"nextHost": v.beaconNodeHosts[next],
|
||||
"currentHost": hosts[v.currentHostIndex],
|
||||
"nextHost": hosts[next],
|
||||
}).Warn("Beacon node is not responding, switching host")
|
||||
v.validatorClient.SetHost(v.beaconNodeHosts[next])
|
||||
v.validatorClient.SetHost(hosts[next])
|
||||
v.currentHostIndex = next
|
||||
}
|
||||
|
||||
// hosts returns the list of configured beacon node hosts for failover.
|
||||
func (v *validator) hosts() []string {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return v.beaconNodeHosts
|
||||
}
|
||||
if v.grpcConnectionProvider != nil {
|
||||
return v.grpcConnectionProvider.Hosts()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// numHosts returns the number of configured beacon node hosts for failover.
|
||||
func (v *validator) numHosts() int {
|
||||
return len(v.hosts())
|
||||
}
|
||||
|
||||
func (v *validator) FindHealthyHost(ctx context.Context) bool {
|
||||
// Tail-recursive closure keeps retry count private.
|
||||
var check func(remaining int) bool
|
||||
@@ -1277,18 +1299,20 @@ func (v *validator) FindHealthyHost(ctx context.Context) bool {
|
||||
if v.nodeClient.IsReady(ctx) { // ready → done
|
||||
return true
|
||||
}
|
||||
if len(v.beaconNodeHosts) == 1 && features.Get().EnableBeaconRESTApi {
|
||||
log.WithField("host", v.Host()).Warn("Beacon node is not responding, no backup node configured")
|
||||
return false
|
||||
log.WithField("host", v.Host()).Debug("Beacon node not fully synced")
|
||||
|
||||
// Try next host if not the last iteration
|
||||
if i < numHosts-1 {
|
||||
v.changeHost()
|
||||
}
|
||||
if remaining == 0 || !features.Get().EnableBeaconRESTApi {
|
||||
return false // exhausted or REST disabled
|
||||
}
|
||||
v.changeHost()
|
||||
return check(remaining - 1) // recurse
|
||||
}
|
||||
|
||||
return check(len(v.beaconNodeHosts))
|
||||
if numHosts == 1 {
|
||||
log.WithField("host", v.Host()).Warn("Beacon node is not fully synced, no backup node configured")
|
||||
} else {
|
||||
log.Warn("No fully synced beacon node found")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *validator) filterAndCacheActiveKeys(ctx context.Context, pubkeys [][fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) ([][fieldparams.BLSPubkeyLength]byte, error) {
|
||||
|
||||
@@ -2792,6 +2792,10 @@ func TestValidator_Host(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidator_ChangeHost(t *testing.T) {
|
||||
// Enable REST API mode for this test since changeHost only calls SetHost in REST API mode
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBeaconRESTApi: true})
|
||||
defer resetCfg()
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"converts.go",
|
||||
"grpc_connection_provider.go",
|
||||
"metadata.go",
|
||||
"node_connection.go",
|
||||
],
|
||||
@@ -15,6 +16,7 @@ go_library(
|
||||
"//validator/db/iface:go_default_library",
|
||||
"//validator/slashing-protection-history/format:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
181
validator/helpers/grpc_connection_provider.go
Normal file
181
validator/helpers/grpc_connection_provider.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "helpers")
|
||||
|
||||
// GrpcConnectionProvider manages multiple gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// Conn returns the connection at the given index.
|
||||
Conn(index int) *grpc.ClientConn
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
SetHost(index int) error
|
||||
// NextHost switches to the next available endpoint in round-robin fashion.
|
||||
NextHost()
|
||||
// Close closes all managed connections.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
connections []*grpc.ClientConn
|
||||
|
||||
// Atomic index for lock-free current endpoint access
|
||||
currentIndex atomic.Uint64
|
||||
|
||||
// Mutex only for Close() and write operations that need log consistency
|
||||
mu sync.Mutex
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages multiple gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// It creates a separate connection for each endpoint using the provided dial options.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
connections := make([]*grpc.ClientConn, 0, len(endpoints))
|
||||
for _, ep := range endpoints {
|
||||
conn, err := grpc.DialContext(ctx, ep, dialOpts...)
|
||||
if err != nil {
|
||||
// Clean up already created connections
|
||||
for _, c := range connections {
|
||||
if err := c.Close(); err != nil {
|
||||
log.WithError(err).Warn("Failed to close connection during cleanup")
|
||||
}
|
||||
}
|
||||
return nil, pkgErrors.Wrapf(err, "failed to connect to gRPC endpoint %s", ep)
|
||||
}
|
||||
connections = append(connections, conn)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider with multiple endpoints")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
connections: connections,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
idx := p.currentIndex.Load() % uint64(len(p.connections))
|
||||
return p.connections[idx]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
idx := p.currentIndex.Load() % uint64(len(p.endpoints))
|
||||
return p.endpoints[idx]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Conn(index int) *grpc.ClientConn {
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
if index < 0 || index >= len(p.connections) {
|
||||
return nil
|
||||
}
|
||||
return p.connections[index]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx%uint64(len(p.endpoints))],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Info("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) NextHost() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
newIdx := (oldIdx + 1) % uint64(len(p.endpoints))
|
||||
p.currentIndex.Store(newIdx)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[newIdx],
|
||||
}).Debug("Switched to next gRPC endpoint")
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
p.closed.Store(true)
|
||||
|
||||
var errs []error
|
||||
for i, conn := range p.connections {
|
||||
if err := conn.Close(); err != nil {
|
||||
errs = append(errs, pkgErrors.Wrapf(err, "failed to close connection to %s", p.endpoints[i]))
|
||||
}
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
@@ -14,14 +14,19 @@ type NodeConnection interface {
|
||||
setBeaconApiHeaders(map[string][]string)
|
||||
GetBeaconApiTimeout() time.Duration
|
||||
setBeaconApiTimeout(time.Duration)
|
||||
// GetGrpcConnectionProvider returns the gRPC connection provider for multi-endpoint support.
|
||||
// Returns nil if no provider is configured (single endpoint mode).
|
||||
GetGrpcConnectionProvider() GrpcConnectionProvider
|
||||
setGrpcConnectionProvider(GrpcConnectionProvider)
|
||||
dummy()
|
||||
}
|
||||
|
||||
type nodeConnection struct {
|
||||
grpcClientConn *grpc.ClientConn
|
||||
beaconApiUrl string
|
||||
beaconApiHeaders map[string][]string
|
||||
beaconApiTimeout time.Duration
|
||||
grpcClientConn *grpc.ClientConn
|
||||
grpcConnectionProvider GrpcConnectionProvider
|
||||
beaconApiUrl string
|
||||
beaconApiHeaders map[string][]string
|
||||
beaconApiTimeout time.Duration
|
||||
}
|
||||
|
||||
// NodeConnectionOption is a functional option for configuring the node connection.
|
||||
@@ -41,7 +46,18 @@ func WithBeaconApiTimeout(timeout time.Duration) NodeConnectionOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithGrpcConnectionProvider sets the gRPC connection provider for multi-endpoint support.
|
||||
func WithGrpcConnectionProvider(provider GrpcConnectionProvider) NodeConnectionOption {
|
||||
return func(nc NodeConnection) {
|
||||
nc.setGrpcConnectionProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetGrpcClientConn() *grpc.ClientConn {
|
||||
// If a connection provider is configured, use its current connection
|
||||
if c.grpcConnectionProvider != nil {
|
||||
return c.grpcConnectionProvider.CurrentConn()
|
||||
}
|
||||
return c.grpcClientConn
|
||||
}
|
||||
|
||||
@@ -65,6 +81,14 @@ func (c *nodeConnection) setBeaconApiTimeout(timeout time.Duration) {
|
||||
c.beaconApiTimeout = timeout
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetGrpcConnectionProvider() GrpcConnectionProvider {
|
||||
return c.grpcConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) setGrpcConnectionProvider(provider GrpcConnectionProvider) {
|
||||
c.grpcConnectionProvider = provider
|
||||
}
|
||||
|
||||
func (*nodeConnection) dummy() {}
|
||||
|
||||
func NewNodeConnection(grpcConn *grpc.ClientConn, beaconApiUrl string, opts ...NodeConnectionOption) NodeConnection {
|
||||
|
||||
Reference in New Issue
Block a user