mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-10 21:14:58 -05:00
Compare commits
12 Commits
feat/proce
...
GetVersion
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b0f34a79f | ||
|
|
bb0f70ad60 | ||
|
|
dc66f8872d | ||
|
|
db2bb5505c | ||
|
|
14f01bbc6c | ||
|
|
c3e74e4a5d | ||
|
|
e7ae6a004b | ||
|
|
862fb2eb4a | ||
|
|
bb80a9c832 | ||
|
|
c1b668a50a | ||
|
|
fab687d96d | ||
|
|
cf94ccbf72 |
@@ -1,25 +1,39 @@
|
|||||||
version: v1.7.0-alpha.1
|
version: v1.7.0-alpha.2
|
||||||
style: full
|
style: full
|
||||||
|
|
||||||
specrefs:
|
specrefs:
|
||||||
search_root: ..
|
search_root: .
|
||||||
|
auto_standardize_names: true
|
||||||
|
auto_add_missing_entries: true
|
||||||
|
require_exceptions_have_fork: true
|
||||||
|
|
||||||
files:
|
files:
|
||||||
- configs.yml
|
- specrefs/configs.yml
|
||||||
- constants.yml
|
- specrefs/constants.yml
|
||||||
- containers.yml
|
- specrefs/containers.yml
|
||||||
- dataclasses.yml
|
- specrefs/dataclasses.yml
|
||||||
- functions.yml
|
- specrefs/functions.yml
|
||||||
- presets.yml
|
- specrefs/presets.yml
|
||||||
|
|
||||||
exceptions:
|
exceptions:
|
||||||
presets:
|
presets:
|
||||||
# Not implemented: gloas (future fork)
|
# gloas
|
||||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||||
- PTC_SIZE#gloas
|
- PTC_SIZE#gloas
|
||||||
|
|
||||||
constants:
|
constants:
|
||||||
# Constants in the KZG library
|
# phase0
|
||||||
|
- BASIS_POINTS#phase0
|
||||||
|
- ENDIANNESS#phase0
|
||||||
|
- MAX_CONCURRENT_REQUESTS#phase0
|
||||||
|
- UINT64_MAX#phase0
|
||||||
|
- UINT64_MAX_SQRT#phase0
|
||||||
|
# altair
|
||||||
|
- PARTICIPATION_FLAG_WEIGHTS#altair
|
||||||
|
# bellatrix
|
||||||
|
- SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix
|
||||||
|
# deneb
|
||||||
- BLS_MODULUS#deneb
|
- BLS_MODULUS#deneb
|
||||||
- BYTES_PER_COMMITMENT#deneb
|
- BYTES_PER_COMMITMENT#deneb
|
||||||
- BYTES_PER_FIELD_ELEMENT#deneb
|
- BYTES_PER_FIELD_ELEMENT#deneb
|
||||||
@@ -33,18 +47,9 @@ exceptions:
|
|||||||
- PRIMITIVE_ROOT_OF_UNITY#deneb
|
- PRIMITIVE_ROOT_OF_UNITY#deneb
|
||||||
- RANDOM_CHALLENGE_KZG_BATCH_DOMAIN#deneb
|
- RANDOM_CHALLENGE_KZG_BATCH_DOMAIN#deneb
|
||||||
- RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu
|
- RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu
|
||||||
|
# fulu
|
||||||
# Not implemented
|
|
||||||
- BASIS_POINTS#phase0
|
|
||||||
- ENDIANNESS#phase0
|
|
||||||
- MAX_CONCURRENT_REQUESTS#phase0
|
|
||||||
- PARTICIPATION_FLAG_WEIGHTS#altair
|
|
||||||
- SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix
|
|
||||||
- UINT256_MAX#fulu
|
- UINT256_MAX#fulu
|
||||||
- UINT64_MAX#phase0
|
# gloas
|
||||||
- UINT64_MAX_SQRT#phase0
|
|
||||||
|
|
||||||
# Not implemented: gloas (future fork)
|
|
||||||
- BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas
|
- BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas
|
||||||
- BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas
|
- BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas
|
||||||
- BUILDER_WITHDRAWAL_PREFIX#gloas
|
- BUILDER_WITHDRAWAL_PREFIX#gloas
|
||||||
@@ -61,61 +66,62 @@ exceptions:
|
|||||||
- PTC_TIMELINESS_INDEX#gloas
|
- PTC_TIMELINESS_INDEX#gloas
|
||||||
|
|
||||||
configs:
|
configs:
|
||||||
# Not implemented: gloas (future fork)
|
# gloas
|
||||||
- AGGREGATE_DUE_BPS_GLOAS#gloas
|
- AGGREGATE_DUE_BPS_GLOAS#gloas
|
||||||
- ATTESTATION_DUE_BPS_GLOAS#gloas
|
- ATTESTATION_DUE_BPS_GLOAS#gloas
|
||||||
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
|
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
|
||||||
- GLOAS_FORK_EPOCH#gloas
|
- GLOAS_FORK_EPOCH#gloas
|
||||||
- GLOAS_FORK_VERSION#gloas
|
- GLOAS_FORK_VERSION#gloas
|
||||||
- MAX_REQUEST_PAYLOADS#gloas
|
- MAX_REQUEST_PAYLOADS#gloas
|
||||||
|
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
|
||||||
|
|
||||||
ssz_objects:
|
ssz_objects:
|
||||||
# Not implemented
|
# phase0
|
||||||
- Eth1Block#phase0
|
- Eth1Block#phase0
|
||||||
- MatrixEntry#fulu
|
# capella
|
||||||
|
|
||||||
# Not implemented: capella
|
|
||||||
- LightClientBootstrap#capella
|
- LightClientBootstrap#capella
|
||||||
- LightClientFinalityUpdate#capella
|
- LightClientFinalityUpdate#capella
|
||||||
- LightClientOptimisticUpdate#capella
|
- LightClientOptimisticUpdate#capella
|
||||||
- LightClientUpdate#capella
|
- LightClientUpdate#capella
|
||||||
|
# fulu
|
||||||
# Not implemented: gloas (future fork)
|
- MatrixEntry#fulu
|
||||||
|
# gloas
|
||||||
- BeaconBlockBody#gloas
|
- BeaconBlockBody#gloas
|
||||||
- BeaconState#gloas
|
- BeaconState#gloas
|
||||||
|
- Builder#gloas
|
||||||
- BuilderPendingPayment#gloas
|
- BuilderPendingPayment#gloas
|
||||||
- BuilderPendingWithdrawal#gloas
|
- BuilderPendingWithdrawal#gloas
|
||||||
- DataColumnSidecar#gloas
|
- DataColumnSidecar#gloas
|
||||||
- ExecutionPayloadEnvelope#gloas
|
|
||||||
- ExecutionPayloadBid#gloas
|
- ExecutionPayloadBid#gloas
|
||||||
|
- ExecutionPayloadEnvelope#gloas
|
||||||
- ForkChoiceNode#gloas
|
- ForkChoiceNode#gloas
|
||||||
- IndexedPayloadAttestation#gloas
|
- IndexedPayloadAttestation#gloas
|
||||||
- PayloadAttestation#gloas
|
- PayloadAttestation#gloas
|
||||||
- PayloadAttestationData#gloas
|
- PayloadAttestationData#gloas
|
||||||
- PayloadAttestationMessage#gloas
|
- PayloadAttestationMessage#gloas
|
||||||
- SignedExecutionPayloadEnvelope#gloas
|
|
||||||
- SignedExecutionPayloadBid#gloas
|
|
||||||
- Builder#gloas
|
|
||||||
- ProposerPreferences#gloas
|
- ProposerPreferences#gloas
|
||||||
|
- SignedExecutionPayloadBid#gloas
|
||||||
|
- SignedExecutionPayloadEnvelope#gloas
|
||||||
- SignedProposerPreferences#gloas
|
- SignedProposerPreferences#gloas
|
||||||
|
|
||||||
dataclasses:
|
dataclasses:
|
||||||
# Not implemented
|
# phase0
|
||||||
- BlobParameters#fulu
|
|
||||||
- ExpectedWithdrawals#capella
|
|
||||||
- ExpectedWithdrawals#electra
|
|
||||||
- LatestMessage#phase0
|
- LatestMessage#phase0
|
||||||
- LightClientStore#altair
|
|
||||||
- OptimisticStore#bellatrix
|
|
||||||
- Store#phase0
|
- Store#phase0
|
||||||
|
# altair
|
||||||
# Not implemented: capella
|
- LightClientStore#altair
|
||||||
|
# bellatrix
|
||||||
|
- OptimisticStore#bellatrix
|
||||||
|
# capella
|
||||||
|
- ExpectedWithdrawals#capella
|
||||||
- LightClientStore#capella
|
- LightClientStore#capella
|
||||||
|
# electra
|
||||||
# Not implemented: gloas (future fork)
|
- ExpectedWithdrawals#electra
|
||||||
|
# fulu
|
||||||
|
- BlobParameters#fulu
|
||||||
|
# gloas
|
||||||
- ExpectedWithdrawals#gloas
|
- ExpectedWithdrawals#gloas
|
||||||
- LatestMessage#gloas
|
- LatestMessage#gloas
|
||||||
- Store#gloas
|
- Store#gloas
|
||||||
@@ -140,7 +146,6 @@ exceptions:
|
|||||||
- g1_lincomb#deneb
|
- g1_lincomb#deneb
|
||||||
- hash_to_bls_field#deneb
|
- hash_to_bls_field#deneb
|
||||||
- is_power_of_two#deneb
|
- is_power_of_two#deneb
|
||||||
- multi_exp#deneb
|
|
||||||
- reverse_bits#deneb
|
- reverse_bits#deneb
|
||||||
- validate_kzg_g1#deneb
|
- validate_kzg_g1#deneb
|
||||||
- verify_blob_kzg_proof#deneb
|
- verify_blob_kzg_proof#deneb
|
||||||
@@ -175,7 +180,12 @@ exceptions:
|
|||||||
- verify_cell_kzg_proof_batch#fulu
|
- verify_cell_kzg_proof_batch#fulu
|
||||||
- verify_cell_kzg_proof_batch_impl#fulu
|
- verify_cell_kzg_proof_batch_impl#fulu
|
||||||
|
|
||||||
# Not implemented: phase0
|
# phase0
|
||||||
|
- update_proposer_boost_root#phase0
|
||||||
|
- is_proposer_equivocation#phase0
|
||||||
|
- record_block_timeliness#phase0
|
||||||
|
- compute_proposer_score#phase0
|
||||||
|
- get_attestation_score#phase0
|
||||||
- calculate_committee_fraction#phase0
|
- calculate_committee_fraction#phase0
|
||||||
- compute_fork_version#phase0
|
- compute_fork_version#phase0
|
||||||
- compute_pulled_up_tip#phase0
|
- compute_pulled_up_tip#phase0
|
||||||
@@ -221,8 +231,7 @@ exceptions:
|
|||||||
- validate_on_attestation#phase0
|
- validate_on_attestation#phase0
|
||||||
- validate_target_epoch_against_current_time#phase0
|
- validate_target_epoch_against_current_time#phase0
|
||||||
- xor#phase0
|
- xor#phase0
|
||||||
|
# altair
|
||||||
# Not implemented: altair
|
|
||||||
- compute_merkle_proof#altair
|
- compute_merkle_proof#altair
|
||||||
- compute_sync_committee_period_at_slot#altair
|
- compute_sync_committee_period_at_slot#altair
|
||||||
- get_contribution_and_proof#altair
|
- get_contribution_and_proof#altair
|
||||||
@@ -244,27 +253,29 @@ exceptions:
|
|||||||
- process_sync_committee_contributions#altair
|
- process_sync_committee_contributions#altair
|
||||||
- set_or_append_list#altair
|
- set_or_append_list#altair
|
||||||
- validate_light_client_update#altair
|
- validate_light_client_update#altair
|
||||||
|
# bellatrix
|
||||||
# Not implemented: bellatrix
|
|
||||||
- get_execution_payload#bellatrix
|
- get_execution_payload#bellatrix
|
||||||
- is_merge_transition_block#bellatrix
|
- is_merge_transition_block#bellatrix
|
||||||
- is_optimistic_candidate_block#bellatrix
|
- is_optimistic_candidate_block#bellatrix
|
||||||
- latest_verified_ancestor#bellatrix
|
- latest_verified_ancestor#bellatrix
|
||||||
- prepare_execution_payload#bellatrix
|
- prepare_execution_payload#bellatrix
|
||||||
|
# capella
|
||||||
# Not implemented: capella
|
- apply_withdrawals#capella
|
||||||
|
- get_balance_after_withdrawals#capella
|
||||||
- get_lc_execution_root#capella
|
- get_lc_execution_root#capella
|
||||||
|
- get_validators_sweep_withdrawals#capella
|
||||||
- is_valid_light_client_header#capella
|
- is_valid_light_client_header#capella
|
||||||
- prepare_execution_payload#capella
|
- prepare_execution_payload#capella
|
||||||
- process_epoch#capella
|
- process_epoch#capella
|
||||||
|
- update_next_withdrawal_index#capella
|
||||||
|
- update_next_withdrawal_validator_index#capella
|
||||||
- upgrade_lc_bootstrap_to_capella#capella
|
- upgrade_lc_bootstrap_to_capella#capella
|
||||||
- upgrade_lc_finality_update_to_capella#capella
|
- upgrade_lc_finality_update_to_capella#capella
|
||||||
- upgrade_lc_header_to_capella#capella
|
- upgrade_lc_header_to_capella#capella
|
||||||
- upgrade_lc_optimistic_update_to_capella#capella
|
- upgrade_lc_optimistic_update_to_capella#capella
|
||||||
- upgrade_lc_store_to_capella#capella
|
- upgrade_lc_store_to_capella#capella
|
||||||
- upgrade_lc_update_to_capella#capella
|
- upgrade_lc_update_to_capella#capella
|
||||||
|
# deneb
|
||||||
# Not implemented: deneb
|
|
||||||
- get_lc_execution_root#deneb
|
- get_lc_execution_root#deneb
|
||||||
- is_valid_light_client_header#deneb
|
- is_valid_light_client_header#deneb
|
||||||
- prepare_execution_payload#deneb
|
- prepare_execution_payload#deneb
|
||||||
@@ -274,33 +285,34 @@ exceptions:
|
|||||||
- upgrade_lc_optimistic_update_to_deneb#deneb
|
- upgrade_lc_optimistic_update_to_deneb#deneb
|
||||||
- upgrade_lc_store_to_deneb#deneb
|
- upgrade_lc_store_to_deneb#deneb
|
||||||
- upgrade_lc_update_to_deneb#deneb
|
- upgrade_lc_update_to_deneb#deneb
|
||||||
|
# electra
|
||||||
# Not implemented: electra
|
|
||||||
- compute_weak_subjectivity_period#electra
|
- compute_weak_subjectivity_period#electra
|
||||||
- current_sync_committee_gindex_at_slot#electra
|
- current_sync_committee_gindex_at_slot#electra
|
||||||
- finalized_root_gindex_at_slot#electra
|
- finalized_root_gindex_at_slot#electra
|
||||||
- get_eth1_vote#electra
|
- get_eth1_vote#electra
|
||||||
- get_lc_execution_root#electra
|
- get_lc_execution_root#electra
|
||||||
|
- get_pending_partial_withdrawals#electra
|
||||||
|
- get_validators_sweep_withdrawals#electra
|
||||||
- is_compounding_withdrawal_credential#electra
|
- is_compounding_withdrawal_credential#electra
|
||||||
|
- is_eligible_for_partial_withdrawals#electra
|
||||||
- is_within_weak_subjectivity_period#electra
|
- is_within_weak_subjectivity_period#electra
|
||||||
- next_sync_committee_gindex_at_slot#electra
|
- next_sync_committee_gindex_at_slot#electra
|
||||||
- normalize_merkle_branch#electra
|
- normalize_merkle_branch#electra
|
||||||
- prepare_execution_payload#electra
|
- prepare_execution_payload#electra
|
||||||
|
- update_pending_partial_withdrawals#electra
|
||||||
- upgrade_lc_bootstrap_to_electra#electra
|
- upgrade_lc_bootstrap_to_electra#electra
|
||||||
- upgrade_lc_finality_update_to_electra#electra
|
- upgrade_lc_finality_update_to_electra#electra
|
||||||
- upgrade_lc_header_to_electra#electra
|
- upgrade_lc_header_to_electra#electra
|
||||||
- upgrade_lc_optimistic_update_to_electra#electra
|
- upgrade_lc_optimistic_update_to_electra#electra
|
||||||
- upgrade_lc_store_to_electra#electra
|
- upgrade_lc_store_to_electra#electra
|
||||||
- upgrade_lc_update_to_electra#electra
|
- upgrade_lc_update_to_electra#electra
|
||||||
|
# fulu
|
||||||
# Not implemented: fulu
|
|
||||||
- compute_matrix#fulu
|
- compute_matrix#fulu
|
||||||
- get_blob_parameters#fulu
|
- get_blob_parameters#fulu
|
||||||
- get_data_column_sidecars_from_block#fulu
|
- get_data_column_sidecars_from_block#fulu
|
||||||
- get_data_column_sidecars_from_column_sidecar#fulu
|
- get_data_column_sidecars_from_column_sidecar#fulu
|
||||||
- recover_matrix#fulu
|
- recover_matrix#fulu
|
||||||
|
# gloas
|
||||||
# Not implemented: gloas (future fork)
|
|
||||||
- compute_balance_weighted_acceptance#gloas
|
- compute_balance_weighted_acceptance#gloas
|
||||||
- compute_balance_weighted_selection#gloas
|
- compute_balance_weighted_selection#gloas
|
||||||
- compute_fork_version#gloas
|
- compute_fork_version#gloas
|
||||||
@@ -368,49 +380,42 @@ exceptions:
|
|||||||
- verify_execution_payload_bid_signature#gloas
|
- verify_execution_payload_bid_signature#gloas
|
||||||
- add_builder_to_registry#gloas
|
- add_builder_to_registry#gloas
|
||||||
- apply_deposit_for_builder#gloas
|
- apply_deposit_for_builder#gloas
|
||||||
- apply_withdrawals#capella
|
|
||||||
- apply_withdrawals#gloas
|
- apply_withdrawals#gloas
|
||||||
- can_builder_cover_bid#gloas
|
- can_builder_cover_bid#gloas
|
||||||
- compute_proposer_score#phase0
|
|
||||||
- convert_builder_index_to_validator_index#gloas
|
- convert_builder_index_to_validator_index#gloas
|
||||||
- convert_validator_index_to_builder_index#gloas
|
- convert_validator_index_to_builder_index#gloas
|
||||||
- get_attestation_score#gloas
|
- get_attestation_score#gloas
|
||||||
- get_attestation_score#phase0
|
- get_attestation_score#phase0
|
||||||
- get_balance_after_withdrawals#capella
|
- get_balance_after_withdrawals#capella
|
||||||
- get_builder_from_deposit#gloas
|
|
||||||
- get_builder_withdrawals#gloas
|
- get_builder_withdrawals#gloas
|
||||||
- get_builders_sweep_withdrawals#gloas
|
- get_builders_sweep_withdrawals#gloas
|
||||||
- get_index_for_new_builder#gloas
|
- get_index_for_new_builder#gloas
|
||||||
- get_pending_balance_to_withdraw_for_builder#gloas
|
- get_pending_balance_to_withdraw_for_builder#gloas
|
||||||
- get_pending_partial_withdrawals#electra
|
|
||||||
- get_proposer_preferences_signature#gloas
|
- get_proposer_preferences_signature#gloas
|
||||||
- get_upcoming_proposal_slots#gloas
|
- get_upcoming_proposal_slots#gloas
|
||||||
- get_validators_sweep_withdrawals#capella
|
|
||||||
- get_validators_sweep_withdrawals#electra
|
|
||||||
- initiate_builder_exit#gloas
|
- initiate_builder_exit#gloas
|
||||||
- is_active_builder#gloas
|
- is_active_builder#gloas
|
||||||
- is_builder_index#gloas
|
- is_builder_index#gloas
|
||||||
|
- is_data_available#gloas
|
||||||
- is_eligible_for_partial_withdrawals#electra
|
- is_eligible_for_partial_withdrawals#electra
|
||||||
- is_head_late#gloas
|
- is_head_late#gloas
|
||||||
- is_head_weak#gloas
|
- is_head_weak#gloas
|
||||||
- is_parent_strong#gloas
|
- is_parent_strong#gloas
|
||||||
- is_proposer_equivocation#phase0
|
|
||||||
- is_valid_proposal_slot#gloas
|
- is_valid_proposal_slot#gloas
|
||||||
|
- onboard_builders_from_pending_deposits#gloas
|
||||||
- process_deposit_request#gloas
|
- process_deposit_request#gloas
|
||||||
- process_voluntary_exit#gloas
|
- process_voluntary_exit#gloas
|
||||||
- record_block_timeliness#gloas
|
- record_block_timeliness#gloas
|
||||||
- record_block_timeliness#phase0
|
- record_block_timeliness#phase0
|
||||||
|
- verify_data_column_sidecar_kzg_proofs#gloas
|
||||||
- should_apply_proposer_boost#gloas
|
- should_apply_proposer_boost#gloas
|
||||||
- update_builder_pending_withdrawals#gloas
|
- update_builder_pending_withdrawals#gloas
|
||||||
- update_next_withdrawal_builder_index#gloas
|
- update_next_withdrawal_builder_index#gloas
|
||||||
- update_next_withdrawal_index#capella
|
|
||||||
- update_next_withdrawal_validator_index#capella
|
|
||||||
- update_payload_expected_withdrawals#gloas
|
- update_payload_expected_withdrawals#gloas
|
||||||
- update_pending_partial_withdrawals#electra
|
|
||||||
- update_proposer_boost_root#gloas
|
- update_proposer_boost_root#gloas
|
||||||
- update_proposer_boost_root#phase0
|
|
||||||
|
|
||||||
presets:
|
presets:
|
||||||
|
# gloas
|
||||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||||
- BUILDER_REGISTRY_LIMIT#gloas
|
- BUILDER_REGISTRY_LIMIT#gloas
|
||||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||||
8
.github/workflows/check-specrefs.yml
vendored
8
.github/workflows/check-specrefs.yml
vendored
@@ -12,11 +12,11 @@ jobs:
|
|||||||
- name: Check version consistency
|
- name: Check version consistency
|
||||||
run: |
|
run: |
|
||||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
|
||||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
echo "Versions match: $WORKSPACE_VERSION"
|
echo "Versions match: $WORKSPACE_VERSION"
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
run: python3 -mpip install ethspecify
|
run: python3 -mpip install ethspecify
|
||||||
|
|
||||||
- name: Update spec references
|
- name: Update spec references
|
||||||
run: ethspecify process --path=specrefs
|
run: ethspecify
|
||||||
|
|
||||||
- name: Check for differences
|
- name: Check for differences
|
||||||
run: |
|
run: |
|
||||||
@@ -40,4 +40,4 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Check spec references
|
- name: Check spec references
|
||||||
run: ethspecify check --path=specrefs
|
run: ethspecify check
|
||||||
|
|||||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
|||||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||||
)
|
)
|
||||||
|
|
||||||
consensus_spec_version = "v1.7.0-alpha.1"
|
consensus_spec_version = "v1.7.0-alpha.2"
|
||||||
|
|
||||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||||
|
|
||||||
consensus_spec_tests(
|
consensus_spec_tests(
|
||||||
name = "consensus_spec_tests",
|
name = "consensus_spec_tests",
|
||||||
flavors = {
|
flavors = {
|
||||||
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
|
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
|
||||||
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
|
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
|
||||||
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
|
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
|
||||||
},
|
},
|
||||||
version = consensus_spec_version,
|
version = consensus_spec_version,
|
||||||
)
|
)
|
||||||
@@ -298,7 +298,7 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
|
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
|
||||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||||
)
|
)
|
||||||
|
|||||||
19
api/fallback/BUILD.bazel
Normal file
19
api/fallback/BUILD.bazel
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"fallback.go",
|
||||||
|
"log.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["fallback_test.go"],
|
||||||
|
embed = [":go_default_library"],
|
||||||
|
deps = ["//testing/assert:go_default_library"],
|
||||||
|
)
|
||||||
66
api/fallback/fallback.go
Normal file
66
api/fallback/fallback.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package fallback
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostProvider is the subset of connection-provider methods that EnsureReady
|
||||||
|
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
|
||||||
|
// satisfy this interface.
|
||||||
|
type HostProvider interface {
|
||||||
|
Hosts() []string
|
||||||
|
CurrentHost() string
|
||||||
|
SwitchHost(index int) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadyChecker can report whether the current endpoint is ready.
|
||||||
|
// iface.NodeClient satisfies this implicitly.
|
||||||
|
type ReadyChecker interface {
|
||||||
|
IsReady(ctx context.Context) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureReady iterates through the configured hosts and returns true as soon as
|
||||||
|
// one responds as ready. It starts from the provider's current host and wraps
|
||||||
|
// around using modular arithmetic, performing failover when a host is not ready.
|
||||||
|
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
|
||||||
|
hosts := provider.Hosts()
|
||||||
|
numHosts := len(hosts)
|
||||||
|
startingHost := provider.CurrentHost()
|
||||||
|
var attemptedHosts []string
|
||||||
|
|
||||||
|
// Find current index
|
||||||
|
currentIdx := 0
|
||||||
|
for i, h := range hosts {
|
||||||
|
if h == startingHost {
|
||||||
|
currentIdx = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range numHosts {
|
||||||
|
if checker.IsReady(ctx) {
|
||||||
|
if len(attemptedHosts) > 0 {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"previous": startingHost,
|
||||||
|
"current": provider.CurrentHost(),
|
||||||
|
"tried": attemptedHosts,
|
||||||
|
}).Info("Switched to responsive beacon node")
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
|
||||||
|
|
||||||
|
// Try next host if not the last iteration
|
||||||
|
if i < numHosts-1 {
|
||||||
|
nextIdx := (currentIdx + i + 1) % numHosts
|
||||||
|
if err := provider.SwitchHost(nextIdx); err != nil {
|
||||||
|
log.WithError(err).Error("Failed to switch host")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
|
||||||
|
return false
|
||||||
|
}
|
||||||
94
api/fallback/fallback_test.go
Normal file
94
api/fallback/fallback_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package fallback
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mockHostProvider is a minimal HostProvider for unit tests.
|
||||||
|
type mockHostProvider struct {
|
||||||
|
hosts []string
|
||||||
|
hostIndex int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockHostProvider) Hosts() []string { return m.hosts }
|
||||||
|
func (m *mockHostProvider) CurrentHost() string {
|
||||||
|
return m.hosts[m.hostIndex%len(m.hosts)]
|
||||||
|
}
|
||||||
|
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
|
||||||
|
|
||||||
|
// mockReadyChecker records per-call IsReady results in sequence.
|
||||||
|
type mockReadyChecker struct {
|
||||||
|
results []bool
|
||||||
|
idx int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
|
||||||
|
if m.idx >= len(m.results) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
r := m.results[m.idx]
|
||||||
|
m.idx++
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_SingleHostReady(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||||
|
checker := &mockReadyChecker{results: []bool{true}}
|
||||||
|
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||||
|
assert.Equal(t, 0, provider.hostIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||||
|
checker := &mockReadyChecker{results: []bool{false}}
|
||||||
|
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_SingleHostError(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||||
|
checker := &mockReadyChecker{results: []bool{false}}
|
||||||
|
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{
|
||||||
|
hosts: []string{"http://host1:3500", "http://host2:3500"},
|
||||||
|
hostIndex: 0,
|
||||||
|
}
|
||||||
|
checker := &mockReadyChecker{results: []bool{true}}
|
||||||
|
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||||
|
assert.Equal(t, 0, provider.hostIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{
|
||||||
|
hosts: []string{"http://host1:3500", "http://host2:3500"},
|
||||||
|
hostIndex: 0,
|
||||||
|
}
|
||||||
|
checker := &mockReadyChecker{results: []bool{false, true}}
|
||||||
|
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||||
|
assert.Equal(t, 1, provider.hostIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{
|
||||||
|
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
|
||||||
|
hostIndex: 0,
|
||||||
|
}
|
||||||
|
checker := &mockReadyChecker{results: []bool{false, false, false}}
|
||||||
|
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
|
||||||
|
provider := &mockHostProvider{
|
||||||
|
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
|
||||||
|
hostIndex: 1,
|
||||||
|
}
|
||||||
|
// host1 (start) fails, host2 fails, host0 succeeds
|
||||||
|
checker := &mockReadyChecker{results: []bool{false, false, true}}
|
||||||
|
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||||
|
assert.Equal(t, 0, provider.hostIndex)
|
||||||
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||||
package blocks
|
package fallback
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
import "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||||
var log = logrus.WithField("package", "consensus-types/blocks")
|
var log = logrus.WithField("package", "api/fallback")
|
||||||
@@ -25,6 +25,11 @@ type GrpcConnectionProvider interface {
|
|||||||
// SwitchHost switches to the endpoint at the given index.
|
// SwitchHost switches to the endpoint at the given index.
|
||||||
// The new connection is created lazily on next CurrentConn() call.
|
// The new connection is created lazily on next CurrentConn() call.
|
||||||
SwitchHost(index int) error
|
SwitchHost(index int) error
|
||||||
|
// ConnectionCounter returns a monotonically increasing counter that increments
|
||||||
|
// each time SwitchHost changes the active endpoint. This allows consumers to
|
||||||
|
// detect connection changes even when the host string returns to a previous value
|
||||||
|
// (e.g., host0 → host1 → host0).
|
||||||
|
ConnectionCounter() uint64
|
||||||
// Close closes the current connection.
|
// Close closes the current connection.
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
@@ -38,6 +43,7 @@ type grpcConnectionProvider struct {
|
|||||||
// Current connection state (protected by mutex)
|
// Current connection state (protected by mutex)
|
||||||
currentIndex uint64
|
currentIndex uint64
|
||||||
conn *grpc.ClientConn
|
conn *grpc.ClientConn
|
||||||
|
connCounter uint64
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
closed bool
|
closed bool
|
||||||
@@ -138,6 +144,7 @@ func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
|||||||
|
|
||||||
p.conn = nil // Clear immediately - new connection created lazily
|
p.conn = nil // Clear immediately - new connection created lazily
|
||||||
p.currentIndex = uint64(index)
|
p.currentIndex = uint64(index)
|
||||||
|
p.connCounter++
|
||||||
|
|
||||||
// Close old connection asynchronously to avoid blocking the caller
|
// Close old connection asynchronously to avoid blocking the caller
|
||||||
if oldConn != nil {
|
if oldConn != nil {
|
||||||
@@ -155,6 +162,12 @@ func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
return p.connCounter
|
||||||
|
}
|
||||||
|
|
||||||
func (p *grpcConnectionProvider) Close() {
|
func (p *grpcConnectionProvider) Close() {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|||||||
@@ -4,17 +4,24 @@ import "google.golang.org/grpc"
|
|||||||
|
|
||||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||||
type MockGrpcProvider struct {
|
type MockGrpcProvider struct {
|
||||||
MockConn *grpc.ClientConn
|
MockConn *grpc.ClientConn
|
||||||
MockHosts []string
|
MockHosts []string
|
||||||
|
CurrentIndex int
|
||||||
|
ConnCounter uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||||
func (m *MockGrpcProvider) CurrentHost() string {
|
func (m *MockGrpcProvider) CurrentHost() string {
|
||||||
if len(m.MockHosts) > 0 {
|
if len(m.MockHosts) > 0 {
|
||||||
return m.MockHosts[0]
|
return m.MockHosts[m.CurrentIndex]
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||||
func (m *MockGrpcProvider) SwitchHost(int) error { return nil }
|
func (m *MockGrpcProvider) SwitchHost(idx int) error {
|
||||||
func (m *MockGrpcProvider) Close() {}
|
m.CurrentIndex = idx
|
||||||
|
m.ConnCounter++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
|
||||||
|
func (m *MockGrpcProvider) Close() {}
|
||||||
|
|||||||
@@ -9,13 +9,13 @@ import (
|
|||||||
// MockRestProvider implements RestConnectionProvider for testing.
|
// MockRestProvider implements RestConnectionProvider for testing.
|
||||||
type MockRestProvider struct {
|
type MockRestProvider struct {
|
||||||
MockClient *http.Client
|
MockClient *http.Client
|
||||||
MockHandler RestHandler
|
MockHandler Handler
|
||||||
MockHosts []string
|
MockHosts []string
|
||||||
HostIndex int
|
HostIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
|
||||||
func (m *MockRestProvider) CurrentHost() string {
|
func (m *MockRestProvider) CurrentHost() string {
|
||||||
if len(m.MockHosts) > 0 {
|
if len(m.MockHosts) > 0 {
|
||||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||||
@@ -25,25 +25,22 @@ func (m *MockRestProvider) CurrentHost() string {
|
|||||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||||
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
||||||
|
|
||||||
// MockRestHandler implements RestHandler for testing.
|
// MockHandler implements Handler for testing.
|
||||||
type MockRestHandler struct {
|
type MockHandler struct {
|
||||||
MockHost string
|
MockHost string
|
||||||
MockClient *http.Client
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockRestHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
||||||
func (m *MockRestHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
||||||
return http.StatusOK, nil
|
return http.StatusOK, nil
|
||||||
}
|
}
|
||||||
func (m *MockRestHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
func (m *MockRestHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *MockRestHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
func (m *MockRestHandler) HttpClient() *http.Client { return m.MockClient }
|
func (m *MockHandler) Host() string { return m.MockHost }
|
||||||
func (m *MockRestHandler) Host() string { return m.MockHost }
|
|
||||||
func (m *MockRestHandler) SwitchHost(host string) { m.MockHost = host }
|
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ import (
|
|||||||
type RestConnectionProvider interface {
|
type RestConnectionProvider interface {
|
||||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||||
HttpClient() *http.Client
|
HttpClient() *http.Client
|
||||||
// RestHandler returns the REST handler for making API requests.
|
// Handler returns the REST handler for making API requests.
|
||||||
RestHandler() RestHandler
|
Handler() Handler
|
||||||
// CurrentHost returns the current REST API endpoint URL.
|
// CurrentHost returns the current REST API endpoint URL.
|
||||||
CurrentHost() string
|
CurrentHost() string
|
||||||
// Hosts returns all configured REST API endpoint URLs.
|
// Hosts returns all configured REST API endpoint URLs.
|
||||||
@@ -54,7 +54,7 @@ func WithTracing() RestConnectionProviderOption {
|
|||||||
type restConnectionProvider struct {
|
type restConnectionProvider struct {
|
||||||
endpoints []string
|
endpoints []string
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
restHandler RestHandler
|
restHandler *handler
|
||||||
currentIndex atomic.Uint64
|
currentIndex atomic.Uint64
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
headers map[string][]string
|
headers map[string][]string
|
||||||
@@ -96,7 +96,7 @@ func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the REST handler with the HTTP client and initial host
|
// Create the REST handler with the HTTP client and initial host
|
||||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
p.restHandler = newHandler(*p.httpClient, endpoints[0])
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"endpoints": endpoints,
|
"endpoints": endpoints,
|
||||||
@@ -124,7 +124,7 @@ func (p *restConnectionProvider) HttpClient() *http.Client {
|
|||||||
return p.httpClient
|
return p.httpClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
func (p *restConnectionProvider) Handler() Handler {
|
||||||
return p.restHandler
|
return p.restHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,32 +21,35 @@ import (
|
|||||||
|
|
||||||
type reqOption func(*http.Request)
|
type reqOption func(*http.Request)
|
||||||
|
|
||||||
// RestHandler defines the interface for making REST API requests.
|
// Handler defines the interface for making REST API requests.
|
||||||
type RestHandler interface {
|
type Handler interface {
|
||||||
Get(ctx context.Context, endpoint string, resp any) error
|
Get(ctx context.Context, endpoint string, resp any) error
|
||||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||||
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
|
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
|
||||||
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
|
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
|
||||||
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
|
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
|
||||||
HttpClient() *http.Client
|
|
||||||
Host() string
|
Host() string
|
||||||
SwitchHost(host string)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type restHandler struct {
|
type handler struct {
|
||||||
client http.Client
|
client http.Client
|
||||||
host string
|
host string
|
||||||
reqOverrides []reqOption
|
reqOverrides []reqOption
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRestHandler returns a RestHandler (internal use)
|
// newHandler returns a *handler for internal use within the rest package.
|
||||||
func newRestHandler(client http.Client, host string) RestHandler {
|
func newHandler(client http.Client, host string) *handler {
|
||||||
return NewRestHandler(client, host)
|
rh := &handler{
|
||||||
|
client: client,
|
||||||
|
host: host,
|
||||||
|
}
|
||||||
|
rh.appendAcceptOverride()
|
||||||
|
return rh
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRestHandler returns a RestHandler
|
// NewHandler returns a Handler
|
||||||
func NewRestHandler(client http.Client, host string) RestHandler {
|
func NewHandler(client http.Client, host string) Handler {
|
||||||
rh := &restHandler{
|
rh := &handler{
|
||||||
client: client,
|
client: client,
|
||||||
host: host,
|
host: host,
|
||||||
}
|
}
|
||||||
@@ -57,7 +60,7 @@ func NewRestHandler(client http.Client, host string) RestHandler {
|
|||||||
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
||||||
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
||||||
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
||||||
func (c *restHandler) appendAcceptOverride() {
|
func (c *handler) appendAcceptOverride() {
|
||||||
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
||||||
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
||||||
req.Header.Set("Accept", accept)
|
req.Header.Set("Accept", accept)
|
||||||
@@ -66,18 +69,18 @@ func (c *restHandler) appendAcceptOverride() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HttpClient returns the underlying HTTP client of the handler
|
// HttpClient returns the underlying HTTP client of the handler
|
||||||
func (c *restHandler) HttpClient() *http.Client {
|
func (c *handler) HttpClient() *http.Client {
|
||||||
return &c.client
|
return &c.client
|
||||||
}
|
}
|
||||||
|
|
||||||
// Host returns the underlying HTTP host
|
// Host returns the underlying HTTP host
|
||||||
func (c *restHandler) Host() string {
|
func (c *handler) Host() string {
|
||||||
return c.host
|
return c.host
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||||
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
func (c *handler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||||
url := c.host + endpoint
|
url := c.host + endpoint
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -100,7 +103,7 @@ func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error
|
|||||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||||
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
func (c *handler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||||
url := c.host + endpoint
|
url := c.host + endpoint
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -119,7 +122,7 @@ func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int,
|
|||||||
return httpResp.StatusCode, nil
|
return httpResp.StatusCode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
func (c *handler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||||
url := c.host + endpoint
|
url := c.host + endpoint
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -174,7 +177,7 @@ func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http
|
|||||||
|
|
||||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||||
func (c *restHandler) Post(
|
func (c *handler) Post(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
apiEndpoint string,
|
apiEndpoint string,
|
||||||
headers map[string]string,
|
headers map[string]string,
|
||||||
@@ -210,7 +213,7 @@ func (c *restHandler) Post(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
||||||
func (c *restHandler) PostSSZ(
|
func (c *handler) PostSSZ(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
apiEndpoint string,
|
apiEndpoint string,
|
||||||
headers map[string]string,
|
headers map[string]string,
|
||||||
@@ -311,6 +314,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *restHandler) SwitchHost(host string) {
|
func (c *handler) SwitchHost(host string) {
|
||||||
c.host = host
|
c.host = host
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
|||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
type ExecutionPayloadBid struct {
|
type ExecutionPayloadBid struct {
|
||||||
ParentBlockHash string `json:"parent_block_hash"`
|
ParentBlockHash string `json:"parent_block_hash"`
|
||||||
ParentBlockRoot string `json:"parent_block_root"`
|
ParentBlockRoot string `json:"parent_block_root"`
|
||||||
BlockHash string `json:"block_hash"`
|
BlockHash string `json:"block_hash"`
|
||||||
PrevRandao string `json:"prev_randao"`
|
PrevRandao string `json:"prev_randao"`
|
||||||
FeeRecipient string `json:"fee_recipient"`
|
FeeRecipient string `json:"fee_recipient"`
|
||||||
GasLimit string `json:"gas_limit"`
|
GasLimit string `json:"gas_limit"`
|
||||||
BuilderIndex string `json:"builder_index"`
|
BuilderIndex string `json:"builder_index"`
|
||||||
Slot string `json:"slot"`
|
Slot string `json:"slot"`
|
||||||
Value string `json:"value"`
|
Value string `json:"value"`
|
||||||
ExecutionPayment string `json:"execution_payment"`
|
ExecutionPayment string `json:"execution_payment"`
|
||||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SignedExecutionPayloadBid struct {
|
type SignedExecutionPayloadBid struct {
|
||||||
|
|||||||
@@ -2939,18 +2939,22 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
||||||
|
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
|
||||||
|
for i := range b.BlobKzgCommitments {
|
||||||
|
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
|
||||||
|
}
|
||||||
return &ExecutionPayloadBid{
|
return &ExecutionPayloadBid{
|
||||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||||
BlockHash: hexutil.Encode(b.BlockHash),
|
BlockHash: hexutil.Encode(b.BlockHash),
|
||||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
PrevRandao: hexutil.Encode(b.PrevRandao),
|
||||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
||||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||||
Slot: fmt.Sprintf("%d", b.Slot),
|
Slot: fmt.Sprintf("%d", b.Slot),
|
||||||
Value: fmt.Sprintf("%d", b.Value),
|
Value: fmt.Sprintf("%d", b.Value),
|
||||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
BlobKzgCommitments: blobKzgCommitments,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3187,22 +3191,30 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
||||||
}
|
}
|
||||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
|
||||||
|
}
|
||||||
|
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
|
||||||
|
for i, commitment := range b.BlobKzgCommitments {
|
||||||
|
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
|
||||||
|
if err != nil {
|
||||||
|
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
|
||||||
|
}
|
||||||
|
blobKzgCommitments[i] = kzg
|
||||||
}
|
}
|
||||||
return ð.ExecutionPayloadBid{
|
return ð.ExecutionPayloadBid{
|
||||||
ParentBlockHash: parentBlockHash,
|
ParentBlockHash: parentBlockHash,
|
||||||
ParentBlockRoot: parentBlockRoot,
|
ParentBlockRoot: parentBlockRoot,
|
||||||
BlockHash: blockHash,
|
BlockHash: blockHash,
|
||||||
PrevRandao: prevRandao,
|
PrevRandao: prevRandao,
|
||||||
FeeRecipient: feeRecipient,
|
FeeRecipient: feeRecipient,
|
||||||
GasLimit: gasLimit,
|
GasLimit: gasLimit,
|
||||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||||
Slot: primitives.Slot(slot),
|
Slot: primitives.Slot(slot),
|
||||||
Value: primitives.Gwei(value),
|
Value: primitives.Gwei(value),
|
||||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
BlobKzgCommitments: blobKzgCommitments,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -63,6 +63,19 @@ type PeerCount struct {
|
|||||||
Connected string `json:"connected"`
|
Connected string `json:"connected"`
|
||||||
Disconnecting string `json:"disconnecting"`
|
Disconnecting string `json:"disconnecting"`
|
||||||
}
|
}
|
||||||
|
type GetVersionV2Response struct {
|
||||||
|
Data *VersionV2 `json:"data"`
|
||||||
|
}
|
||||||
|
type VersionV2 struct {
|
||||||
|
BeaconNode *ClientVersionV1 `json:"beacon_node"`
|
||||||
|
ExecutionClient []*ClientVersionV1 `json:"execution_client"`
|
||||||
|
}
|
||||||
|
type ClientVersionV1 struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Commit string `json:"commit"`
|
||||||
|
}
|
||||||
|
|
||||||
type GetVersionResponse struct {
|
type GetVersionResponse struct {
|
||||||
Data *Version `json:"data"`
|
Data *Version `json:"data"`
|
||||||
|
|||||||
@@ -85,6 +85,7 @@ go_library(
|
|||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"//io/logs:go_default_library",
|
||||||
"//math:go_default_library",
|
"//math:go_default_library",
|
||||||
"//monitoring/tracing:go_default_library",
|
"//monitoring/tracing:go_default_library",
|
||||||
"//monitoring/tracing/trace:go_default_library",
|
"//monitoring/tracing/trace:go_default_library",
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
|||||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||||
|
|
||||||
for i := range cells {
|
for i := range cells {
|
||||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
copy(ckzgCells[i][:], cells[i][:])
|
||||||
}
|
}
|
||||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/io/logs"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||||
@@ -87,36 +88,45 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
|||||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrap(err, "failed to get slot start time")
|
||||||
}
|
}
|
||||||
level := log.Logger.GetLevel()
|
parentRoot := block.ParentRoot()
|
||||||
|
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
|
||||||
|
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
|
||||||
|
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
|
||||||
|
|
||||||
|
lessFields := logrus.Fields{
|
||||||
|
"slot": block.Slot(),
|
||||||
|
"block": blkRoot,
|
||||||
|
"finalizedEpoch": finalized.Epoch,
|
||||||
|
"finalizedRoot": finalizedRoot,
|
||||||
|
"epoch": slots.ToEpoch(block.Slot()),
|
||||||
|
"sinceSlotStartTime": sinceSlotStartTime,
|
||||||
|
}
|
||||||
|
moreFields := logrus.Fields{
|
||||||
|
"slot": block.Slot(),
|
||||||
|
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||||
|
"block": blkRoot,
|
||||||
|
"epoch": slots.ToEpoch(block.Slot()),
|
||||||
|
"justifiedEpoch": justified.Epoch,
|
||||||
|
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||||
|
"finalizedEpoch": finalized.Epoch,
|
||||||
|
"finalizedRoot": finalizedRoot,
|
||||||
|
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||||
|
"version": version.String(block.Version()),
|
||||||
|
"sinceSlotStartTime": sinceSlotStartTime,
|
||||||
|
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||||
|
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||||
if level >= logrus.DebugLevel {
|
if level >= logrus.DebugLevel {
|
||||||
parentRoot := block.ParentRoot()
|
log.WithFields(moreFields).Info("Synced new block")
|
||||||
lf := logrus.Fields{
|
return nil
|
||||||
"slot": block.Slot(),
|
|
||||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
|
||||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
|
||||||
"epoch": slots.ToEpoch(block.Slot()),
|
|
||||||
"justifiedEpoch": justified.Epoch,
|
|
||||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
|
||||||
"finalizedEpoch": finalized.Epoch,
|
|
||||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
|
||||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
|
||||||
"version": version.String(block.Version()),
|
|
||||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
|
||||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
|
||||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
|
||||||
}
|
|
||||||
log.WithFields(lf).Debug("Synced new block")
|
|
||||||
} else {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"slot": block.Slot(),
|
|
||||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
|
||||||
"finalizedEpoch": finalized.Epoch,
|
|
||||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
|
||||||
"epoch": slots.ToEpoch(block.Slot()),
|
|
||||||
}).Info("Synced new block")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
|
||||||
|
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
|
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||||
mb.broadcastCalled = true
|
mb.broadcastCalled = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,27 +17,56 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
|
|
||||||
//
|
//
|
||||||
// signed_bid = block.body.signed_execution_payload_bid
|
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
|
||||||
// bid = signed_bid.message
|
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
// builder_index = bid.builder_index
|
// signed_bid = block.body.signed_execution_payload_bid
|
||||||
// amount = bid.value
|
// bid = signed_bid.message
|
||||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
// builder_index = bid.builder_index
|
||||||
// assert amount == 0
|
// amount = bid.value
|
||||||
// assert signed_bid.signature == G2_POINT_AT_INFINITY
|
//
|
||||||
// else:
|
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
|
||||||
// assert is_active_builder(state, builder_index)
|
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
// assert amount == 0
|
||||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
|
||||||
// assert bid.slot == block.slot
|
// else:
|
||||||
// assert bid.parent_block_hash == state.latest_block_hash
|
// # Verify that the builder is active
|
||||||
// assert bid.parent_block_root == block.parent_root
|
// assert is_active_builder(state, builder_index)
|
||||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
// # Verify that the builder has funds to cover the bid
|
||||||
// if amount > 0:
|
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||||
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
|
// # Verify that the bid signature is valid
|
||||||
// state.latest_execution_payload_bid = bid
|
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||||
|
//
|
||||||
|
// # Verify commitments are under limit
|
||||||
|
// assert (
|
||||||
|
// len(bid.blob_kzg_commitments)
|
||||||
|
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// # Verify that the bid is for the current slot
|
||||||
|
// assert bid.slot == block.slot
|
||||||
|
// # Verify that the bid is for the right parent block
|
||||||
|
// assert bid.parent_block_hash == state.latest_block_hash
|
||||||
|
// assert bid.parent_block_root == block.parent_root
|
||||||
|
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||||
|
//
|
||||||
|
// # Record the pending payment if there is some payment
|
||||||
|
// if amount > 0:
|
||||||
|
// pending_payment = BuilderPendingPayment(
|
||||||
|
// weight=0,
|
||||||
|
// withdrawal=BuilderPendingWithdrawal(
|
||||||
|
// fee_recipient=bid.fee_recipient,
|
||||||
|
// amount=amount,
|
||||||
|
// builder_index=builder_index,
|
||||||
|
// ),
|
||||||
|
// )
|
||||||
|
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
|
||||||
|
// pending_payment
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// # Cache the signed execution payload bid
|
||||||
|
// state.latest_execution_payload_bid = bid
|
||||||
|
// </spec>
|
||||||
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
|
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
|
||||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -86,6 +115,12 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
|
||||||
|
commitmentCount := bid.BlobKzgCommitmentCount()
|
||||||
|
if commitmentCount > uint64(maxBlobsPerBlock) {
|
||||||
|
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
|
||||||
|
}
|
||||||
|
|
||||||
if err := validateBidConsistency(st, bid, block); err != nil {
|
if err := validateBidConsistency(st, bid, block); err != nil {
|
||||||
return errors.Wrap(err, "bid consistency validation failed")
|
return errors.Wrap(err, "bid consistency validation failed")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -184,6 +184,28 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
|
||||||
|
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||||
|
if count > max {
|
||||||
|
count = max
|
||||||
|
}
|
||||||
|
commitments := make([][]byte, count)
|
||||||
|
for i := range commitments {
|
||||||
|
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||||
|
}
|
||||||
|
return commitments
|
||||||
|
}
|
||||||
|
|
||||||
|
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
|
||||||
|
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||||
|
count := max + 1
|
||||||
|
commitments := make([][]byte, count)
|
||||||
|
for i := range commitments {
|
||||||
|
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||||
|
}
|
||||||
|
return commitments
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||||
slot := primitives.Slot(12)
|
slot := primitives.Slot(12)
|
||||||
proposerIdx := primitives.ValidatorIndex(0)
|
proposerIdx := primitives.ValidatorIndex(0)
|
||||||
@@ -194,17 +216,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 0,
|
Value: 0,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||||
}
|
}
|
||||||
signed := ðpb.SignedExecutionPayloadBid{
|
signed := ðpb.SignedExecutionPayloadBid{
|
||||||
Message: bid,
|
Message: bid,
|
||||||
@@ -236,16 +258,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 10,
|
Value: 10,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||||
}
|
}
|
||||||
signed := ðpb.SignedExecutionPayloadBid{
|
signed := ðpb.SignedExecutionPayloadBid{
|
||||||
Message: bid,
|
Message: bid,
|
||||||
@@ -280,17 +302,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 500_000,
|
Value: 500_000,
|
||||||
ExecutionPayment: 1,
|
ExecutionPayment: 1,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||||
}
|
}
|
||||||
|
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
@@ -341,17 +363,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
|
|||||||
state = stateIface.(*state_native.BeaconState)
|
state = stateIface.(*state_native.BeaconState)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 10,
|
Value: 10,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||||
}
|
}
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||||
@@ -394,17 +416,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
|
|||||||
state = stateIface.(*state_native.BeaconState)
|
state = stateIface.(*state_native.BeaconState)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 25,
|
Value: 25,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||||
}
|
}
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||||
@@ -436,17 +458,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 10,
|
Value: 10,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||||
}
|
}
|
||||||
// Use an invalid signature.
|
// Use an invalid signature.
|
||||||
invalidSig := [96]byte{1}
|
invalidSig := [96]byte{1}
|
||||||
@@ -463,6 +485,42 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
|||||||
require.ErrorContains(t, "bid signature validation failed", err)
|
require.ErrorContains(t, "bid signature validation failed", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
|
||||||
|
slot := primitives.Slot(9)
|
||||||
|
proposerIdx := primitives.ValidatorIndex(0)
|
||||||
|
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||||
|
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||||
|
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||||
|
pubKey := [48]byte{}
|
||||||
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||||
|
|
||||||
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
|
ParentBlockHash: latestHash[:],
|
||||||
|
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||||
|
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||||
|
PrevRandao: randao[:],
|
||||||
|
BuilderIndex: builderIdx,
|
||||||
|
Slot: slot,
|
||||||
|
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
|
||||||
|
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||||
|
}
|
||||||
|
signed := ðpb.SignedExecutionPayloadBid{
|
||||||
|
Message: bid,
|
||||||
|
Signature: common.InfiniteSignature[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
block := stubBlock{
|
||||||
|
slot: slot,
|
||||||
|
proposer: proposerIdx,
|
||||||
|
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||||
|
body: stubBlockBody{signedBid: signed},
|
||||||
|
v: version.Gloas,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ProcessExecutionPayloadBid(state, block)
|
||||||
|
require.ErrorContains(t, "blob KZG commitments over max", err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||||
slot := primitives.Slot(10)
|
slot := primitives.Slot(10)
|
||||||
builderIdx := primitives.BuilderIndex(1)
|
builderIdx := primitives.BuilderIndex(1)
|
||||||
@@ -478,17 +536,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot + 1, // mismatch
|
Slot: slot + 1, // mismatch
|
||||||
Value: 1,
|
Value: 1,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||||
}
|
}
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||||
@@ -520,17 +578,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 1,
|
Value: 1,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||||
}
|
}
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||||
@@ -563,17 +621,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
|
|||||||
|
|
||||||
parentRoot := bytes.Repeat([]byte{0x22}, 32)
|
parentRoot := bytes.Repeat([]byte{0x22}, 32)
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: parentRoot,
|
ParentBlockRoot: parentRoot,
|
||||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 1,
|
Value: 1,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||||
}
|
}
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||||
@@ -605,17 +663,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
|
|||||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||||
|
|
||||||
bid := ðpb.ExecutionPayloadBid{
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: latestHash[:],
|
ParentBlockHash: latestHash[:],
|
||||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||||
GasLimit: 1,
|
GasLimit: 1,
|
||||||
BuilderIndex: builderIdx,
|
BuilderIndex: builderIdx,
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
Value: 1,
|
Value: 1,
|
||||||
ExecutionPayment: 0,
|
ExecutionPayment: 0,
|
||||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||||
}
|
}
|
||||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||||
|
|||||||
@@ -24,14 +24,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
|
|
||||||
//
|
//
|
||||||
// data = payload_attestation.data
|
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
|
||||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
// def process_payload_attestation(
|
||||||
// assert data.slot + 1 == state.slot
|
// state: BeaconState, payload_attestation: PayloadAttestation
|
||||||
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
// ) -> None:
|
||||||
// assert is_valid_indexed_payload_attestation(state, indexed)
|
// data = payload_attestation.data
|
||||||
|
//
|
||||||
|
// # Check that the attestation is for the parent beacon block
|
||||||
|
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||||
|
// # Check that the attestation is for the previous slot
|
||||||
|
// assert data.slot + 1 == state.slot
|
||||||
|
// # Verify signature
|
||||||
|
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
|
||||||
|
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
|
||||||
|
// </spec>
|
||||||
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||||
atts, err := body.PayloadAttestations()
|
atts, err := body.PayloadAttestations()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -90,17 +97,24 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
|||||||
}
|
}
|
||||||
|
|
||||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
|
||||||
//
|
//
|
||||||
// epoch = compute_epoch_at_slot(slot)
|
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
|
||||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||||
// indices = []
|
// """
|
||||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
// Get the payload timeliness committee for the given ``slot``.
|
||||||
// for i in range(committees_per_slot):
|
// """
|
||||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
// epoch = compute_epoch_at_slot(slot)
|
||||||
// indices.extend(committee)
|
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||||
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
|
// indices: List[ValidatorIndex] = []
|
||||||
|
// # Concatenate all committees for this slot in order
|
||||||
|
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||||
|
// for i in range(committees_per_slot):
|
||||||
|
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||||
|
// indices.extend(committee)
|
||||||
|
// return compute_balance_weighted_selection(
|
||||||
|
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
|
||||||
|
// )
|
||||||
|
// </spec>
|
||||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||||
epoch := slots.ToEpoch(slot)
|
epoch := slots.ToEpoch(slot)
|
||||||
seed, err := ptcSeed(st, epoch, slot)
|
seed, err := ptcSeed(st, epoch, slot)
|
||||||
@@ -152,17 +166,35 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
|
|||||||
}
|
}
|
||||||
|
|
||||||
// selectByBalance selects a balance-weighted subset of input candidates.
|
// selectByBalance selects a balance-weighted subset of input candidates.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
|
|
||||||
// Note: shuffle_indices is false for PTC.
|
|
||||||
//
|
//
|
||||||
// total = len(indices); selected = []; i = 0
|
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
|
||||||
// while len(selected) < size:
|
// def compute_balance_weighted_selection(
|
||||||
// next = i % total
|
// state: BeaconState,
|
||||||
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
|
// indices: Sequence[ValidatorIndex],
|
||||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
// seed: Bytes32,
|
||||||
// selected.append(indices[next])
|
// size: uint64,
|
||||||
// i += 1
|
// shuffle_indices: bool,
|
||||||
|
// ) -> Sequence[ValidatorIndex]:
|
||||||
|
// """
|
||||||
|
// Return ``size`` indices sampled by effective balance, using ``indices``
|
||||||
|
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
|
||||||
|
// are themselves sampled from ``indices`` by shuffling it, otherwise
|
||||||
|
// ``indices`` is traversed in order.
|
||||||
|
// """
|
||||||
|
// total = uint64(len(indices))
|
||||||
|
// assert total > 0
|
||||||
|
// selected: List[ValidatorIndex] = []
|
||||||
|
// i = uint64(0)
|
||||||
|
// while len(selected) < size:
|
||||||
|
// next_index = i % total
|
||||||
|
// if shuffle_indices:
|
||||||
|
// next_index = compute_shuffled_index(next_index, total, seed)
|
||||||
|
// candidate_index = indices[next_index]
|
||||||
|
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
|
||||||
|
// selected.append(candidate_index)
|
||||||
|
// i += 1
|
||||||
|
// return selected
|
||||||
|
// </spec>
|
||||||
func selectByBalanceFill(
|
func selectByBalanceFill(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
st state.ReadOnlyBeaconState,
|
st state.ReadOnlyBeaconState,
|
||||||
@@ -199,15 +231,22 @@ func selectByBalanceFill(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// compute_balance_weighted_acceptance(state, index, seed, i):
|
|
||||||
//
|
//
|
||||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
|
||||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
// def compute_balance_weighted_acceptance(
|
||||||
// offset = i % 16 * 2
|
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
|
||||||
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
|
// ) -> bool:
|
||||||
// effective_balance = state.validators[index].effective_balance
|
// """
|
||||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
// Return whether to accept the selection of the validator ``index``, with probability
|
||||||
|
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
|
||||||
|
// """
|
||||||
|
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||||
|
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||||
|
// offset = i % 16 * 2
|
||||||
|
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
|
||||||
|
// effective_balance = state.validators[index].effective_balance
|
||||||
|
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||||
|
// </spec>
|
||||||
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
||||||
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
||||||
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
||||||
@@ -224,16 +263,26 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
|
||||||
//
|
//
|
||||||
// indices = indexed_payload_attestation.attesting_indices
|
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
|
||||||
// return len(indices) > 0 and indices == sorted(indices) and
|
// def is_valid_indexed_payload_attestation(
|
||||||
// bls.FastAggregateVerify(
|
// state: BeaconState, attestation: IndexedPayloadAttestation
|
||||||
// [state.validators[i].pubkey for i in indices],
|
// ) -> bool:
|
||||||
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
|
// """
|
||||||
// indexed_payload_attestation.signature,
|
// Check if ``attestation`` is non-empty, has sorted indices, and has
|
||||||
// )
|
// a valid aggregate signature.
|
||||||
|
// """
|
||||||
|
// # Verify indices are non-empty and sorted
|
||||||
|
// indices = attestation.attesting_indices
|
||||||
|
// if len(indices) == 0 or not indices == sorted(indices):
|
||||||
|
// return False
|
||||||
|
//
|
||||||
|
// # Verify aggregate signature
|
||||||
|
// pubkeys = [state.validators[i].pubkey for i in indices]
|
||||||
|
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
|
||||||
|
// signing_root = compute_signing_root(attestation.data, domain)
|
||||||
|
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
|
||||||
|
// </spec>
|
||||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
||||||
indices := att.AttestingIndices
|
indices := att.AttestingIndices
|
||||||
if len(indices) == 0 || !slices.IsSorted(indices) {
|
if len(indices) == 0 || !slices.IsSorted(indices) {
|
||||||
|
|||||||
@@ -10,17 +10,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
|
||||||
//
|
//
|
||||||
// quorum = get_builder_payment_quorum_threshold(state)
|
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
|
||||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||||
// if payment.weight >= quorum:
|
// """
|
||||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
// Processes the builder pending payments from the previous epoch.
|
||||||
|
// """
|
||||||
|
// quorum = get_builder_payment_quorum_threshold(state)
|
||||||
|
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||||
|
// if payment.weight >= quorum:
|
||||||
|
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||||
//
|
//
|
||||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||||
// state.builder_pending_payments = old_payments + new_payments
|
// state.builder_pending_payments = old_payments + new_payments
|
||||||
|
// </spec>
|
||||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||||
quorum, err := builderQuorumThreshold(state)
|
quorum, err := builderQuorumThreshold(state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -53,12 +57,16 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
|
||||||
//
|
//
|
||||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
|
||||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
// """
|
||||||
|
// Calculate the quorum threshold for builder payments.
|
||||||
|
// """
|
||||||
|
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||||
|
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||||
|
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||||
|
// </spec>
|
||||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -11,16 +11,20 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||||
// Spec v1.7.0 (pseudocode):
|
|
||||||
//
|
//
|
||||||
|
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
|
||||||
|
// # [New in Gloas:EIP7732]
|
||||||
|
// # Remove the BuilderPendingPayment corresponding to
|
||||||
|
// # this proposal if it is still in the 2-epoch window.
|
||||||
// slot = header_1.slot
|
// slot = header_1.slot
|
||||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||||
// if proposal_epoch == get_current_epoch(state):
|
// if proposal_epoch == get_current_epoch(state):
|
||||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||||
// elif proposal_epoch == get_previous_epoch(state):
|
// elif proposal_epoch == get_previous_epoch(state):
|
||||||
// payment_index = slot % SLOTS_PER_EPOCH
|
// payment_index = slot % SLOTS_PER_EPOCH
|
||||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||||
|
// </spec>
|
||||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||||
currentEpoch := time.CurrentEpoch(st)
|
currentEpoch := time.CurrentEpoch(st)
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ go_library(
|
|||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@org_golang_x_sync//errgroup:go_default_library",
|
"@org_golang_x_sync//errgroup:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
package peerdas
|
package peerdas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
stderrors "errors"
|
|
||||||
"iter"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@@ -20,7 +16,6 @@ var (
|
|||||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||||
ErrNoKzgCommitments = errors.New("no KZG commitments found")
|
ErrNoKzgCommitments = errors.New("no KZG commitments found")
|
||||||
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
|
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
|
||||||
ErrEmptySegment = errors.New("empty segment in batch")
|
|
||||||
ErrInvalidKZGProof = errors.New("invalid KZG proof")
|
ErrInvalidKZGProof = errors.New("invalid KZG proof")
|
||||||
ErrBadRootLength = errors.New("bad root length")
|
ErrBadRootLength = errors.New("bad root length")
|
||||||
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
|
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
|
||||||
@@ -62,127 +57,62 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CellProofBundleSegment is returned when a batch fails. The caller can call
|
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
|
||||||
// the `.Verify` method to verify just this segment.
|
|
||||||
type CellProofBundleSegment struct {
|
|
||||||
indices []uint64
|
|
||||||
commitments []kzg.Bytes48
|
|
||||||
cells []kzg.Cell
|
|
||||||
proofs []kzg.Bytes48
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify verifies this segment without batching.
|
|
||||||
func (s CellProofBundleSegment) Verify() error {
|
|
||||||
if len(s.cells) == 0 {
|
|
||||||
return ErrEmptySegment
|
|
||||||
}
|
|
||||||
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
|
|
||||||
if err != nil {
|
|
||||||
return stderrors.Join(err, ErrInvalidKZGProof)
|
|
||||||
}
|
|
||||||
if !verified {
|
|
||||||
return ErrInvalidKZGProof
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
|
|
||||||
// ignore the failed segment list since we are just passing in one segment.
|
|
||||||
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
|
|
||||||
// Note: We are slightly deviating from the specification here:
|
// Note: We are slightly deviating from the specification here:
|
||||||
// The specification verifies the KZG proofs for each sidecar separately,
|
// The specification verifies the KZG proofs for each sidecar separately,
|
||||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||||
// This is done to improve performance since the internal KZG library is way more
|
// This is done to improve performance since the internal KZG library is way more
|
||||||
// efficient when verifying in batch. If the batch fails, the failed segments
|
// efficient when verifying in batch.
|
||||||
// are returned to the caller so that they may try segment by segment without
|
|
||||||
// batching. On success the failed segment list is empty.
|
|
||||||
//
|
|
||||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||||
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
|
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||||
commitments := make([]kzg.Bytes48, 0, sizeHint)
|
// Compute the total count.
|
||||||
indices := make([]uint64, 0, sizeHint)
|
count := 0
|
||||||
cells := make([]kzg.Cell, 0, sizeHint)
|
for _, sidecar := range sidecars {
|
||||||
proofs := make([]kzg.Bytes48, 0, sizeHint)
|
count += len(sidecar.Column)
|
||||||
|
}
|
||||||
|
|
||||||
var anySegmentEmpty bool
|
commitments := make([]kzg.Bytes48, 0, count)
|
||||||
var segments []CellProofBundleSegment
|
indices := make([]uint64, 0, count)
|
||||||
for _, cellProofsIter := range cellProofsIters {
|
cells := make([]kzg.Cell, 0, count)
|
||||||
startIdx := len(cells)
|
proofs := make([]kzg.Bytes48, 0, count)
|
||||||
for bundle := range cellProofsIter {
|
|
||||||
|
for _, sidecar := range sidecars {
|
||||||
|
for i := range sidecar.Column {
|
||||||
var (
|
var (
|
||||||
commitment kzg.Bytes48
|
commitment kzg.Bytes48
|
||||||
cell kzg.Cell
|
cell kzg.Cell
|
||||||
proof kzg.Bytes48
|
proof kzg.Bytes48
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(bundle.Commitment) != len(commitment) ||
|
commitmentBytes := sidecar.KzgCommitments[i]
|
||||||
len(bundle.Cell) != len(cell) ||
|
cellBytes := sidecar.Column[i]
|
||||||
len(bundle.Proof) != len(proof) {
|
proofBytes := sidecar.KzgProofs[i]
|
||||||
return nil, ErrMismatchLength
|
|
||||||
|
if len(commitmentBytes) != len(commitment) ||
|
||||||
|
len(cellBytes) != len(cell) ||
|
||||||
|
len(proofBytes) != len(proof) {
|
||||||
|
return ErrMismatchLength
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(commitment[:], bundle.Commitment)
|
copy(commitment[:], commitmentBytes)
|
||||||
copy(cell[:], bundle.Cell)
|
copy(cell[:], cellBytes)
|
||||||
copy(proof[:], bundle.Proof)
|
copy(proof[:], proofBytes)
|
||||||
|
|
||||||
commitments = append(commitments, commitment)
|
commitments = append(commitments, commitment)
|
||||||
indices = append(indices, bundle.ColumnIndex)
|
indices = append(indices, sidecar.Index)
|
||||||
cells = append(cells, cell)
|
cells = append(cells, cell)
|
||||||
proofs = append(proofs, proof)
|
proofs = append(proofs, proof)
|
||||||
}
|
}
|
||||||
if len(cells[startIdx:]) == 0 {
|
|
||||||
anySegmentEmpty = true
|
|
||||||
}
|
|
||||||
segments = append(segments, CellProofBundleSegment{
|
|
||||||
indices: indices[startIdx:],
|
|
||||||
commitments: commitments[startIdx:],
|
|
||||||
cells: cells[startIdx:],
|
|
||||||
proofs: proofs[startIdx:],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if anySegmentEmpty {
|
|
||||||
return segments, ErrEmptySegment
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Batch verify that the cells match the corresponding commitments and proofs.
|
// Batch verify that the cells match the corresponding commitments and proofs.
|
||||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return segments, stderrors.Join(err, ErrInvalidKZGProof)
|
return errors.Wrap(err, "verify cell KZG proof batch")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !verified {
|
if !verified {
|
||||||
return segments, ErrInvalidKZGProof
|
return ErrInvalidKZGProof
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyKzgCommitmentsInclusionProof is the shared implementation for inclusion proof verification.
|
|
||||||
func verifyKzgCommitmentsInclusionProof(bodyRoot []byte, kzgCommitments [][]byte, inclusionProof [][]byte) error {
|
|
||||||
if len(bodyRoot) != fieldparams.RootLength {
|
|
||||||
return ErrBadRootLength
|
|
||||||
}
|
|
||||||
|
|
||||||
leaves := blocks.LeavesFromCommitments(kzgCommitments)
|
|
||||||
|
|
||||||
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "generate trie from items")
|
|
||||||
}
|
|
||||||
|
|
||||||
hashTreeRoot, err := sparse.HashTreeRoot()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "hash tree root")
|
|
||||||
}
|
|
||||||
|
|
||||||
verified := trie.VerifyMerkleProof(bodyRoot, hashTreeRoot[:], kzgPosition, inclusionProof)
|
|
||||||
if !verified {
|
|
||||||
return ErrInvalidInclusionProof
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -194,23 +124,30 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
|||||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||||
return ErrNilBlockHeader
|
return ErrNilBlockHeader
|
||||||
}
|
}
|
||||||
return verifyKzgCommitmentsInclusionProof(
|
|
||||||
sidecar.SignedBlockHeader.Header.BodyRoot,
|
|
||||||
sidecar.KzgCommitments,
|
|
||||||
sidecar.KzgCommitmentsInclusionProof,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyPartialDataColumnHeaderInclusionProof verifies if the KZG commitments are included in the beacon block.
|
root := sidecar.SignedBlockHeader.Header.BodyRoot
|
||||||
func VerifyPartialDataColumnHeaderInclusionProof(header *ethpb.PartialDataColumnHeader) error {
|
if len(root) != fieldparams.RootLength {
|
||||||
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
return ErrBadRootLength
|
||||||
return ErrNilBlockHeader
|
|
||||||
}
|
}
|
||||||
return verifyKzgCommitmentsInclusionProof(
|
|
||||||
header.SignedBlockHeader.Header.BodyRoot,
|
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
|
||||||
header.KzgCommitments,
|
|
||||||
header.KzgCommitmentsInclusionProof,
|
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
||||||
)
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "generate trie from items")
|
||||||
|
}
|
||||||
|
|
||||||
|
hashTreeRoot, err := sparse.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "hash tree root")
|
||||||
|
}
|
||||||
|
|
||||||
|
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
|
||||||
|
if !verified {
|
||||||
|
return ErrInvalidInclusionProof
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package peerdas_test
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"iter"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
@@ -73,7 +72,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
|||||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||||
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
|
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
|
||||||
|
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -81,15 +80,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
|||||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||||
|
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("nominal", func(t *testing.T) {
|
t.Run("nominal", func(t *testing.T) {
|
||||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||||
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(failedSegments))
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,7 +273,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
|||||||
for _, sidecar := range sidecars {
|
for _, sidecar := range sidecars {
|
||||||
sidecars := []blocks.RODataColumn{sidecar}
|
sidecars := []blocks.RODataColumn{sidecar}
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
@@ -310,7 +308,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
|||||||
}
|
}
|
||||||
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
@@ -343,7 +341,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
|||||||
|
|
||||||
for _, sidecars := range allSidecars {
|
for _, sidecars := range allSidecars {
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
@@ -340,8 +339,7 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||||
// commitmentCount is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
|
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||||
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (bitfield.Bitlist /* parts included */, [][]kzg.Cell, [][]kzg.Proof, error) {
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||||
@@ -349,24 +347,14 @@ func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs
|
|||||||
|
|
||||||
var wg errgroup.Group
|
var wg errgroup.Group
|
||||||
|
|
||||||
var blobsPresent int
|
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||||
for _, blobAndProof := range blobsAndProofs {
|
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||||
if blobAndProof != nil {
|
|
||||||
blobsPresent++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cellsPerBlob := make([][]kzg.Cell, blobsPresent)
|
|
||||||
proofsPerBlob := make([][]kzg.Proof, blobsPresent)
|
|
||||||
included := bitfield.NewBitlist(commitmentCount)
|
|
||||||
|
|
||||||
var j int
|
|
||||||
for i, blobAndProof := range blobsAndProofs {
|
for i, blobAndProof := range blobsAndProofs {
|
||||||
if blobAndProof == nil {
|
if blobAndProof == nil {
|
||||||
continue
|
return nil, nil, ErrNilBlobAndProof
|
||||||
}
|
}
|
||||||
included.SetBitAt(uint64(i), true)
|
|
||||||
|
|
||||||
compactIndex := j
|
|
||||||
wg.Go(func() error {
|
wg.Go(func() error {
|
||||||
var kzgBlob kzg.Blob
|
var kzgBlob kzg.Blob
|
||||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||||
@@ -393,18 +381,17 @@ func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs
|
|||||||
kzgProofs = append(kzgProofs, kzgProof)
|
kzgProofs = append(kzgProofs, kzgProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
cellsPerBlob[compactIndex] = cells
|
cellsPerBlob[i] = cells
|
||||||
proofsPerBlob[compactIndex] = kzgProofs
|
proofsPerBlob[i] = kzgProofs
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
j++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wg.Wait(); err != nil {
|
if err := wg.Wait(); err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return included, cellsPerBlob, proofsPerBlob, nil
|
return cellsPerBlob, proofsPerBlob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
|
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
|
||||||
|
|||||||
@@ -479,9 +479,8 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
|||||||
|
|
||||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||||
t.Run("nil blob and proof", func(t *testing.T) {
|
t.Run("nil blob and proof", func(t *testing.T) {
|
||||||
included, _, _, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
|
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||||
require.NoError(t, err)
|
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||||
require.Equal(t, uint64(0), included.Count())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("nominal", func(t *testing.T) {
|
t.Run("nominal", func(t *testing.T) {
|
||||||
@@ -534,8 +533,7 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test ComputeCellsAndProofs
|
// Test ComputeCellsAndProofs
|
||||||
included, actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
|
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||||
require.Equal(t, included.Count(), uint64(len(actualCellsPerBlob)))
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package peerdas
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
@@ -24,13 +23,11 @@ var (
|
|||||||
var (
|
var (
|
||||||
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||||
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||||
_ ConstructionPopulator = (*PartialDataColumnHeaderReconstructionSource)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
BlockType = "BeaconBlock"
|
BlockType = "BeaconBlock"
|
||||||
SidecarType = "DataColumnSidecar"
|
SidecarType = "DataColumnSidecar"
|
||||||
PartialDataColumnHeaderType = "PartialDataColumnHeader"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -57,10 +54,6 @@ type (
|
|||||||
blocks.VerifiedRODataColumn
|
blocks.VerifiedRODataColumn
|
||||||
}
|
}
|
||||||
|
|
||||||
PartialDataColumnHeaderReconstructionSource struct {
|
|
||||||
*ethpb.PartialDataColumnHeader
|
|
||||||
}
|
|
||||||
|
|
||||||
blockInfo struct {
|
blockInfo struct {
|
||||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
||||||
kzgCommitments [][]byte
|
kzgCommitments [][]byte
|
||||||
@@ -78,11 +71,6 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
|
|||||||
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PopulateFromPartialHeader creates a PartialDataColumnHeaderReconstructionSource from a partial header
|
|
||||||
func PopulateFromPartialHeader(header *ethpb.PartialDataColumnHeader) *PartialDataColumnHeaderReconstructionSource {
|
|
||||||
return &PartialDataColumnHeaderReconstructionSource{PartialDataColumnHeader: header}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||||
@@ -155,40 +143,6 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
|||||||
return roSidecars, nil
|
return roSidecars, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.PartialDataColumn, error) {
|
|
||||||
start := time.Now()
|
|
||||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
|
||||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
|
||||||
}
|
|
||||||
info, err := src.extract()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "extract block info")
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
|
|
||||||
for idx := range numberOfColumns {
|
|
||||||
dc, err := blocks.NewPartialDataColumn(info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "new ro data column")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range len(info.kzgCommitments) {
|
|
||||||
if !included.BitAt(uint64(i)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dc.ExtendFromVerfifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
|
|
||||||
cells[idx] = cells[idx][1:]
|
|
||||||
proofs[idx] = proofs[idx][1:]
|
|
||||||
}
|
|
||||||
dataColumns = append(dataColumns, dc)
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
|
||||||
return dataColumns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slot returns the slot of the source
|
// Slot returns the slot of the source
|
||||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||||
return s.Block().Slot()
|
return s.Block().Slot()
|
||||||
@@ -300,43 +254,3 @@ func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
|
|||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slot returns the slot from the partial data column header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Slot() primitives.Slot {
|
|
||||||
return p.SignedBlockHeader.Header.Slot
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the block root computed from the header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Root() [fieldparams.RootLength]byte {
|
|
||||||
root, err := p.SignedBlockHeader.Header.HashTreeRoot()
|
|
||||||
if err != nil {
|
|
||||||
return [fieldparams.RootLength]byte{}
|
|
||||||
}
|
|
||||||
return root
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProposerIndex returns the proposer index from the header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
|
|
||||||
return p.SignedBlockHeader.Header.ProposerIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commitments returns the KZG commitments from the header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Commitments() ([][]byte, error) {
|
|
||||||
return p.KzgCommitments, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the source
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Type() string {
|
|
||||||
return PartialDataColumnHeaderType
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract extracts the block information from the partial header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) extract() (*blockInfo, error) {
|
|
||||||
info := &blockInfo{
|
|
||||||
signedBlockHeader: p.SignedBlockHeader,
|
|
||||||
kzgCommitments: p.KzgCommitments,
|
|
||||||
kzgInclusionProof: p.KzgCommitmentsInclusionProof,
|
|
||||||
}
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -267,31 +267,4 @@ func TestReconstructionSource(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, peerdas.SidecarType, src.Type())
|
require.Equal(t, peerdas.SidecarType, src.Type())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("from partial header", func(t *testing.T) {
|
|
||||||
referenceSidecar := sidecars[0]
|
|
||||||
partialHeader := ðpb.PartialDataColumnHeader{
|
|
||||||
SignedBlockHeader: referenceSidecar.SignedBlockHeader,
|
|
||||||
KzgCommitments: referenceSidecar.KzgCommitments,
|
|
||||||
KzgCommitmentsInclusionProof: referenceSidecar.KzgCommitmentsInclusionProof,
|
|
||||||
}
|
|
||||||
|
|
||||||
src := peerdas.PopulateFromPartialHeader(partialHeader)
|
|
||||||
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.Slot, src.Slot())
|
|
||||||
|
|
||||||
// Compute expected root
|
|
||||||
expectedRoot, err := referenceSidecar.SignedBlockHeader.Header.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedRoot, src.Root())
|
|
||||||
|
|
||||||
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.ProposerIndex, src.ProposerIndex())
|
|
||||||
|
|
||||||
commitments, err := src.Commitments()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 2, len(commitments))
|
|
||||||
require.DeepEqual(t, commitment1, commitments[0])
|
|
||||||
require.DeepEqual(t, commitment2, commitments[1])
|
|
||||||
|
|
||||||
require.Equal(t, peerdas.PartialDataColumnHeaderType, src.Type())
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -143,10 +143,11 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spec v1.6.1 (pseudocode):
|
// <spec fn="process_slot" fork="gloas" lines="11-13" hash="62b28839">
|
||||||
// # [New in Gloas:EIP7732]
|
// # [New in Gloas:EIP7732]
|
||||||
// # Unset the next payload availability
|
// # Unset the next payload availability
|
||||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||||
|
// </spec>
|
||||||
if state.Version() >= version.Gloas {
|
if state.Version() >= version.Gloas {
|
||||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
|
|||||||
BlockHash: make([]byte, 32),
|
BlockHash: make([]byte, 32),
|
||||||
PrevRandao: make([]byte, 32),
|
PrevRandao: make([]byte, 32),
|
||||||
FeeRecipient: make([]byte, 20),
|
FeeRecipient: make([]byte, 20),
|
||||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||||
},
|
},
|
||||||
Eth1Data: ðpb.Eth1Data{
|
Eth1Data: ðpb.Eth1Data{
|
||||||
DepositRoot: make([]byte, 32),
|
DepositRoot: make([]byte, 32),
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package kv
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
@@ -33,6 +34,9 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
|||||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||||
_, blockRoot = bkt.Cursor().Last()
|
_, blockRoot = bkt.Cursor().Last()
|
||||||
|
if len(blockRoot) > 0 {
|
||||||
|
blockRoot = slices.Clone(blockRoot)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||||
panic(err) // lint:nopanic -- View never returns an error.
|
panic(err) // lint:nopanic -- View never returns an error.
|
||||||
@@ -51,6 +55,9 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
|
|||||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket(stateSlotIndicesBucket)
|
bucket := tx.Bucket(stateSlotIndicesBucket)
|
||||||
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
|
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
|
||||||
|
if len(blockRoot) > 0 {
|
||||||
|
blockRoot = slices.Clone(blockRoot)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||||
panic(err) // lint:nopanic -- View never returns an error.
|
panic(err) // lint:nopanic -- View never returns an error.
|
||||||
|
|||||||
@@ -812,7 +812,10 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
|
|||||||
var addr []byte
|
var addr []byte
|
||||||
err := s.db.View(func(tx *bolt.Tx) error {
|
err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
bkt := tx.Bucket(feeRecipientBucket)
|
bkt := tx.Bucket(feeRecipientBucket)
|
||||||
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
stored := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||||
|
if len(stored) > 0 {
|
||||||
|
addr = slices.Clone(stored)
|
||||||
|
}
|
||||||
// IF the fee recipient is not found in the standard fee recipient bucket, then
|
// IF the fee recipient is not found in the standard fee recipient bucket, then
|
||||||
// check the registration bucket. The fee recipient may be there.
|
// check the registration bucket. The fee recipient may be there.
|
||||||
// This is to resolve imcompatility until we fully migrate to the registration bucket.
|
// This is to resolve imcompatility until we fully migrate to the registration bucket.
|
||||||
@@ -826,7 +829,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
|
|||||||
if err := decode(ctx, enc, reg); err != nil {
|
if err := decode(ctx, enc, reg); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
addr = reg.FeeRecipient
|
addr = slices.Clone(reg.FeeRecipient)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package kv
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@@ -17,7 +18,10 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
|||||||
var addr []byte
|
var addr []byte
|
||||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
chainInfo := tx.Bucket(chainMetadataBucket)
|
chainInfo := tx.Bucket(chainMetadataBucket)
|
||||||
addr = chainInfo.Get(depositContractAddressKey)
|
stored := chainInfo.Get(depositContractAddressKey)
|
||||||
|
if len(stored) > 0 {
|
||||||
|
addr = slices.Clone(stored)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||||
panic(err) // lint:nopanic -- View never returns an error.
|
panic(err) // lint:nopanic -- View never returns an error.
|
||||||
|
|||||||
@@ -199,7 +199,7 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
|
|||||||
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
|
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
|
||||||
var keys [][]byte
|
var keys [][]byte
|
||||||
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
|
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
|
||||||
keys = append(keys, pubKey)
|
keys = append(keys, bytes.Clone(pubKey))
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package kv
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||||
@@ -187,20 +188,23 @@ func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
|
|||||||
return bolt.ErrBucketNotFound
|
return bolt.ErrBucketNotFound
|
||||||
}
|
}
|
||||||
buf := append(key, stateSuffix...)
|
buf := append(key, stateSuffix...)
|
||||||
stateDiff = bucket.Get(buf)
|
rawStateDiff := bucket.Get(buf)
|
||||||
if stateDiff == nil {
|
if len(rawStateDiff) == 0 {
|
||||||
return errors.New("state diff not found")
|
return errors.New("state diff not found")
|
||||||
}
|
}
|
||||||
|
stateDiff = slices.Clone(rawStateDiff)
|
||||||
buf = append(key, validatorSuffix...)
|
buf = append(key, validatorSuffix...)
|
||||||
validatorDiff = bucket.Get(buf)
|
rawValidatorDiff := bucket.Get(buf)
|
||||||
if validatorDiff == nil {
|
if len(rawValidatorDiff) == 0 {
|
||||||
return errors.New("validator diff not found")
|
return errors.New("validator diff not found")
|
||||||
}
|
}
|
||||||
|
validatorDiff = slices.Clone(rawValidatorDiff)
|
||||||
buf = append(key, balancesSuffix...)
|
buf = append(key, balancesSuffix...)
|
||||||
balancesDiff = bucket.Get(buf)
|
rawBalancesDiff := bucket.Get(buf)
|
||||||
if balancesDiff == nil {
|
if len(rawBalancesDiff) == 0 {
|
||||||
return errors.New("balances diff not found")
|
return errors.New("balances diff not found")
|
||||||
}
|
}
|
||||||
|
balancesDiff = slices.Clone(rawBalancesDiff)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -224,10 +228,11 @@ func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
|
|||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return bolt.ErrBucketNotFound
|
return bolt.ErrBucketNotFound
|
||||||
}
|
}
|
||||||
enc = bucket.Get(key)
|
rawEnc := bucket.Get(key)
|
||||||
if enc == nil {
|
if rawEnc == nil {
|
||||||
return errors.New("state not found")
|
return errors.New("state not found")
|
||||||
}
|
}
|
||||||
|
enc = slices.Clone(rawEnc)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package kv
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"slices"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
@@ -47,7 +48,11 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
|
|||||||
}
|
}
|
||||||
var enc []byte
|
var enc []byte
|
||||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||||
enc = tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
rawEnc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||||
|
if len(rawEnc) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
enc = slices.Clone(rawEnc)
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ go_library(
|
|||||||
"//testing/spectest:__subpackages__",
|
"//testing/spectest:__subpackages__",
|
||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//api/server/structs:go_default_library",
|
||||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||||
"//beacon-chain/cache:go_default_library",
|
"//beacon-chain/cache:go_default_library",
|
||||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||||
@@ -73,7 +74,6 @@ go_library(
|
|||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||||
"@org_golang_google_protobuf//proto:go_default_library",
|
"@org_golang_google_protobuf//proto:go_default_library",
|
||||||
@@ -100,6 +100,7 @@ go_test(
|
|||||||
data = glob(["testdata/**"]),
|
data = glob(["testdata/**"]),
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//api/server/structs:go_default_library",
|
||||||
"//async/event:go_default_library",
|
"//async/event:go_default_library",
|
||||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||||
"//beacon-chain/blockchain/testing:go_default_library",
|
"//beacon-chain/blockchain/testing:go_default_library",
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
||||||
@@ -59,7 +59,6 @@ var (
|
|||||||
fuluEngineEndpoints = []string{
|
fuluEngineEndpoints = []string{
|
||||||
GetPayloadMethodV5,
|
GetPayloadMethodV5,
|
||||||
GetBlobsV2,
|
GetBlobsV2,
|
||||||
GetBlobsV3,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -101,8 +100,8 @@ const (
|
|||||||
GetBlobsV1 = "engine_getBlobsV1"
|
GetBlobsV1 = "engine_getBlobsV1"
|
||||||
// GetBlobsV2 request string for JSON-RPC.
|
// GetBlobsV2 request string for JSON-RPC.
|
||||||
GetBlobsV2 = "engine_getBlobsV2"
|
GetBlobsV2 = "engine_getBlobsV2"
|
||||||
// GetBlobsV3 request string for JSON-RPC.
|
// GetClientVersionV1 is the JSON-RPC method that identifies the execution client.
|
||||||
GetBlobsV3 = "engine_getBlobsV3"
|
GetClientVersionV1 = "engine_getClientVersionV1"
|
||||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||||
defaultEngineTimeout = time.Second
|
defaultEngineTimeout = time.Second
|
||||||
)
|
)
|
||||||
@@ -126,7 +125,7 @@ type Reconstructor interface {
|
|||||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||||
) ([]interfaces.SignedBeaconBlock, error)
|
) ([]interfaces.SignedBeaconBlock, error)
|
||||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error)
|
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EngineCaller defines a client that can interact with an Ethereum
|
// EngineCaller defines a client that can interact with an Ethereum
|
||||||
@@ -139,6 +138,7 @@ type EngineCaller interface {
|
|||||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (*blocks.GetPayloadResponse, error)
|
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (*blocks.GetPayloadResponse, error)
|
||||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||||
|
GetClientVersionV1(ctx context.Context) ([]*structs.ClientVersionV1, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||||
@@ -557,19 +557,27 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
|||||||
return result, handleRPCError(err)
|
return result, handleRPCError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) GetBlobsV3(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
func (s *Service) GetClientVersionV1(ctx context.Context) ([]*structs.ClientVersionV1, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV3")
|
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetClientVersionV1")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
if !s.capabilityCache.has(GetBlobsV3) {
|
var result []*structs.ClientVersionV1
|
||||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV3))
|
err := s.rpcClient.CallContext(
|
||||||
|
ctx,
|
||||||
|
&result,
|
||||||
|
GetClientVersionV1,
|
||||||
|
structs.ClientVersionV1{
|
||||||
|
Code: "PM",
|
||||||
|
Name: "Prysm",
|
||||||
|
Version: version.SemanticVersion(),
|
||||||
|
Commit: version.GitCommit()[:8],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(result) == 0 {
|
||||||
|
return nil, errors.New("execution client returned no result")
|
||||||
}
|
}
|
||||||
|
|
||||||
getBlobsV3RequestsTotal.Inc()
|
|
||||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
|
||||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV3, versionedHashes)
|
|
||||||
getBlobsV3Latency.Observe(time.Since(start).Seconds())
|
|
||||||
return result, handleRPCError(err)
|
return result, handleRPCError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -683,47 +691,40 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
|||||||
return verifiedBlobs, nil
|
return verifiedBlobs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
|
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
root := populator.Root()
|
root := populator.Root()
|
||||||
|
|
||||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||||
commitments, err := populator.Commitments()
|
commitments, err := populator.Commitments()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, wrapWithBlockRoot(err, root, "commitments")
|
return nil, wrapWithBlockRoot(err, root, "commitments")
|
||||||
}
|
}
|
||||||
|
|
||||||
included, cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||||
log.Info("Received cells and proofs from execution client", "included", included, "cells count", len(cellsPerBlob), "err", err)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||||
}
|
}
|
||||||
|
|
||||||
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, populator)
|
// Return early if nothing is returned from the EL.
|
||||||
haveAllBlobs := included.Count() == uint64(len(commitments))
|
if len(cellsPerBlob) == 0 {
|
||||||
log.Info("Constructed partial columns", "haveAllBlobs", haveAllBlobs)
|
return nil, nil
|
||||||
|
|
||||||
if haveAllBlobs {
|
|
||||||
// Construct data column sidears from the signed block and cells and proofs.
|
|
||||||
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upgrade the sidecars to verified sidecars.
|
|
||||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
|
||||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
|
||||||
|
|
||||||
return verifiedROSidecars, partialColumns, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Construct data column sidears from the signed block and cells and proofs.
|
||||||
|
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "partial columns from column sidecar")
|
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||||
}
|
}
|
||||||
return nil, partialColumns, nil
|
|
||||||
|
// Upgrade the sidecars to verified sidecars.
|
||||||
|
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||||
|
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||||
|
|
||||||
|
return verifiedROSidecars, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) (bitfield.Bitlist /* included parts */, [][]kzg.Cell, [][]kzg.Proof, error) {
|
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||||
// Collect KZG hashes for all blobs.
|
// Collect KZG hashes for all blobs.
|
||||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||||
for _, commitment := range kzgCommitments {
|
for _, commitment := range kzgCommitments {
|
||||||
@@ -731,34 +732,24 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi
|
|||||||
versionedHashes = append(versionedHashes, versionedHash)
|
versionedHashes = append(versionedHashes, versionedHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
var blobAndProofs []*pb.BlobAndProofV2
|
|
||||||
|
|
||||||
// Fetch all blobsAndCellsProofs from the execution client.
|
// Fetch all blobsAndCellsProofs from the execution client.
|
||||||
var err error
|
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||||
useV3 := s.capabilityCache.has(GetBlobsV3)
|
if err != nil {
|
||||||
if useV3 {
|
return nil, nil, errors.Wrapf(err, "get blobs V2")
|
||||||
// v3 can return a partial response. V2 is all or nothing
|
|
||||||
blobAndProofs, err = s.GetBlobsV3(ctx, versionedHashes)
|
|
||||||
} else {
|
|
||||||
blobAndProofs, err = s.GetBlobsV2(ctx, versionedHashes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
// Return early if nothing is returned from the EL.
|
||||||
return nil, nil, nil, errors.Wrapf(err, "get blobs V2/3")
|
if len(blobAndProofV2s) == 0 {
|
||||||
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute cells and proofs from the blobs and cell proofs.
|
// Compute cells and proofs from the blobs and cell proofs.
|
||||||
included, cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(kzgCommitments)), blobAndProofs)
|
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
|
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||||
}
|
|
||||||
if included.Count() == uint64(len(kzgCommitments)) {
|
|
||||||
getBlobsV3CompleteResponsesTotal.Inc()
|
|
||||||
} else if included.Count() > 0 {
|
|
||||||
getBlobsV3PartialResponsesTotal.Inc()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return included, cellsPerBlob, proofsPerBlob, nil
|
return cellsPerBlob, proofsPerBlob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||||
@@ -999,6 +1000,61 @@ func TestClient_HTTP(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepEqual(t, want, resp)
|
require.DeepEqual(t, want, resp)
|
||||||
})
|
})
|
||||||
|
t.Run(GetClientVersionV1, func(t *testing.T) {
|
||||||
|
want := []*structs.ClientVersionV1{{
|
||||||
|
Code: "GE",
|
||||||
|
Name: "go-ethereum",
|
||||||
|
Version: "1.15.11-stable",
|
||||||
|
Commit: "36b2371c",
|
||||||
|
}}
|
||||||
|
|
||||||
|
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, r.Body.Close())
|
||||||
|
}()
|
||||||
|
enc, err := io.ReadAll(r.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
jsonRequestString := string(enc)
|
||||||
|
|
||||||
|
// We expect the JSON string RPC request contains the right method name.
|
||||||
|
require.Equal(t, true, strings.Contains(
|
||||||
|
jsonRequestString, GetClientVersionV1,
|
||||||
|
))
|
||||||
|
require.Equal(t, true, strings.Contains(
|
||||||
|
jsonRequestString, "\"code\":\"PM\"",
|
||||||
|
))
|
||||||
|
require.Equal(t, true, strings.Contains(
|
||||||
|
jsonRequestString, "\"name\":\"Prysm\"",
|
||||||
|
))
|
||||||
|
require.Equal(t, true, strings.Contains(
|
||||||
|
jsonRequestString, fmt.Sprintf("\"version\":\"%s\"", version.SemanticVersion()),
|
||||||
|
))
|
||||||
|
require.Equal(t, true, strings.Contains(
|
||||||
|
jsonRequestString, fmt.Sprintf("\"commit\":\"%s\"", version.GitCommit()[:8]),
|
||||||
|
))
|
||||||
|
resp := map[string]any{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"result": want,
|
||||||
|
}
|
||||||
|
err = json.NewEncoder(w).Encode(resp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}))
|
||||||
|
defer srv.Close()
|
||||||
|
|
||||||
|
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer rpcClient.Close()
|
||||||
|
|
||||||
|
service := &Service{}
|
||||||
|
service.rpcClient = rpcClient
|
||||||
|
|
||||||
|
// We call the RPC method via HTTP and expect a proper result.
|
||||||
|
resp, err := service.GetClientVersionV1(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, want, resp)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReconstructFullBellatrixBlock(t *testing.T) {
|
func TestReconstructFullBellatrixBlock(t *testing.T) {
|
||||||
@@ -2587,7 +2643,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||||
_, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -2598,7 +2654,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
|||||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(dataColumns))
|
require.Equal(t, 0, len(dataColumns))
|
||||||
})
|
})
|
||||||
@@ -2611,7 +2667,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
|||||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 128, len(dataColumns))
|
require.Equal(t, 128, len(dataColumns))
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -34,25 +34,6 @@ var (
|
|||||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
getBlobsV3RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_requests_total",
|
|
||||||
Help: "Total number of engine_getBlobsV3 requests sent",
|
|
||||||
})
|
|
||||||
getBlobsV3CompleteResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_complete_responses_total",
|
|
||||||
Help: "Total number of complete engine_getBlobsV3 successful responses received",
|
|
||||||
})
|
|
||||||
getBlobsV3PartialResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_partial_responses_total",
|
|
||||||
Help: "Total number of engine_getBlobsV3 partial responses received",
|
|
||||||
})
|
|
||||||
getBlobsV3Latency = promauto.NewHistogram(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_request_duration_seconds",
|
|
||||||
Help: "Duration of engine_getBlobsV3 requests in seconds",
|
|
||||||
Buckets: []float64{0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 4},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "execution_parse_error_count",
|
Name: "execution_parse_error_count",
|
||||||
Help: "The number of errors that occurred while parsing execution payload",
|
Help: "The number of errors that occurred while parsing execution payload",
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ go_library(
|
|||||||
"//visibility:public",
|
"//visibility:public",
|
||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//api/server/structs:go_default_library",
|
||||||
"//async/event:go_default_library",
|
"//async/event:go_default_library",
|
||||||
"//beacon-chain/core/peerdas:go_default_library",
|
"//beacon-chain/core/peerdas:go_default_library",
|
||||||
"//beacon-chain/execution/types:go_default_library",
|
"//beacon-chain/execution/types:go_default_library",
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
@@ -42,6 +43,8 @@ type EngineClient struct {
|
|||||||
ErrorBlobSidecars error
|
ErrorBlobSidecars error
|
||||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||||
ErrorDataColumnSidecars error
|
ErrorDataColumnSidecars error
|
||||||
|
ClientVersion []*structs.ClientVersionV1
|
||||||
|
ErrorClientVersion error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPayload --
|
// NewPayload --
|
||||||
@@ -118,8 +121,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
|
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
return e.DataColumnSidecars, nil, e.ErrorDataColumnSidecars
|
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTerminalBlockHash --
|
// GetTerminalBlockHash --
|
||||||
@@ -173,3 +176,8 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
|||||||
blk = parentBlk
|
blk = parentBlk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetClientVersionV1 --
|
||||||
|
func (e *EngineClient) GetClientVersionV1(context.Context) ([]*structs.ClientVersionV1, error) {
|
||||||
|
return e.ClientVersion, e.ErrorClientVersion
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ go_library(
|
|||||||
"doc.go",
|
"doc.go",
|
||||||
"errors.go",
|
"errors.go",
|
||||||
"forkchoice.go",
|
"forkchoice.go",
|
||||||
"last_root.go",
|
|
||||||
"log.go",
|
"log.go",
|
||||||
"metrics.go",
|
"metrics.go",
|
||||||
"node.go",
|
"node.go",
|
||||||
@@ -51,7 +50,6 @@ go_test(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"ffg_update_test.go",
|
"ffg_update_test.go",
|
||||||
"forkchoice_test.go",
|
"forkchoice_test.go",
|
||||||
"last_root_test.go",
|
|
||||||
"no_vote_test.go",
|
"no_vote_test.go",
|
||||||
"node_test.go",
|
"node_test.go",
|
||||||
"on_tick_test.go",
|
"on_tick_test.go",
|
||||||
|
|||||||
@@ -32,7 +32,6 @@ func New() *ForkChoice {
|
|||||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||||
proposerBoostRoot: [32]byte{},
|
proposerBoostRoot: [32]byte{},
|
||||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
|
||||||
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
||||||
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
package doublylinkedtree
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LastRoot returns the last canonical block root in the given epoch
|
|
||||||
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
|
|
||||||
head := f.store.headNode
|
|
||||||
headEpoch := slots.ToEpoch(head.slot)
|
|
||||||
epochEnd, err := slots.EpochEnd(epoch)
|
|
||||||
if err != nil {
|
|
||||||
return [32]byte{}
|
|
||||||
}
|
|
||||||
if headEpoch <= epoch {
|
|
||||||
return head.root
|
|
||||||
}
|
|
||||||
for head != nil && head.slot > epochEnd {
|
|
||||||
head = head.parent
|
|
||||||
}
|
|
||||||
if head == nil {
|
|
||||||
return [32]byte{}
|
|
||||||
}
|
|
||||||
return head.root
|
|
||||||
}
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
package doublylinkedtree
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLastRoot(t *testing.T) {
|
|
||||||
f := setup(0, 0)
|
|
||||||
ctx := t.Context()
|
|
||||||
|
|
||||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
|
||||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
|
||||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
|
||||||
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
|
||||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
|
||||||
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
|
||||||
headNode := f.store.nodeByRoot[[32]byte{'6'}]
|
|
||||||
f.store.headNode = headNode
|
|
||||||
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
|
|
||||||
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
|
|
||||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
|
|
||||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
|
|
||||||
}
|
|
||||||
@@ -94,6 +94,5 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
|||||||
s.previousProposerBoostScore = 0
|
s.previousProposerBoostScore = 0
|
||||||
}
|
}
|
||||||
delete(s.nodeByRoot, node.root)
|
delete(s.nodeByRoot, node.root)
|
||||||
delete(s.nodeByPayload, node.payloadHash)
|
|
||||||
return invalidRoots, nil
|
return invalidRoots, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -113,7 +113,6 @@ func (s *Store) insert(ctx context.Context,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.nodeByPayload[payloadHash] = n
|
|
||||||
s.nodeByRoot[root] = n
|
s.nodeByRoot[root] = n
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
if s.treeRootNode == nil {
|
if s.treeRootNode == nil {
|
||||||
@@ -122,7 +121,6 @@ func (s *Store) insert(ctx context.Context,
|
|||||||
s.highestReceivedNode = n
|
s.highestReceivedNode = n
|
||||||
} else {
|
} else {
|
||||||
delete(s.nodeByRoot, root)
|
delete(s.nodeByRoot, root)
|
||||||
delete(s.nodeByPayload, payloadHash)
|
|
||||||
return nil, errInvalidParentRoot
|
return nil, errInvalidParentRoot
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -191,7 +189,6 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
|||||||
|
|
||||||
node.children = nil
|
node.children = nil
|
||||||
delete(s.nodeByRoot, node.root)
|
delete(s.nodeByRoot, node.root)
|
||||||
delete(s.nodeByPayload, node.payloadHash)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -273,21 +270,6 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
|||||||
return f.store.highestReceivedNode.slot
|
return f.store.highestReceivedNode.slot
|
||||||
}
|
}
|
||||||
|
|
||||||
// HighestReceivedBlockDelay returns the number of slots that the highest
|
|
||||||
// received block was late when receiving it. For example, a block was late by 12 slots,
|
|
||||||
// then this method is expected to return 12.
|
|
||||||
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
|
||||||
n := f.store.highestReceivedNode
|
|
||||||
if n == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
||||||
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||||
count := uint64(0)
|
count := uint64(0)
|
||||||
|
|||||||
@@ -128,10 +128,9 @@ func TestStore_Insert(t *testing.T) {
|
|||||||
// The new node does not have a parent.
|
// The new node does not have a parent.
|
||||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||||
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
|
||||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||||
payloadHash := [32]byte{'a'}
|
payloadHash := [32]byte{'a'}
|
||||||
ctx := t.Context()
|
ctx := t.Context()
|
||||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||||
@@ -238,7 +237,6 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
|||||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||||
require.NoError(t, s.prune(t.Context()))
|
require.NoError(t, s.prune(t.Context()))
|
||||||
require.Equal(t, len(s.nodeByRoot), 1)
|
require.Equal(t, len(s.nodeByRoot), 1)
|
||||||
require.Equal(t, len(s.nodeByPayload), 1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test starts with the following branching diagram
|
// This test starts with the following branching diagram
|
||||||
@@ -319,8 +317,6 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
|||||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||||
require.NoError(t, s.prune(t.Context()))
|
require.NoError(t, s.prune(t.Context()))
|
||||||
require.Equal(t, len(s.nodeByRoot), 1)
|
require.Equal(t, len(s.nodeByRoot), 1)
|
||||||
require.Equal(t, len(s.nodeByPayload), 1)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||||
@@ -339,7 +335,6 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(1), count)
|
require.Equal(t, uint64(1), count)
|
||||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
|
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
|
||||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
|
||||||
|
|
||||||
// 64
|
// 64
|
||||||
// Received block last epoch is 1
|
// Received block last epoch is 1
|
||||||
@@ -352,7 +347,6 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(1), count)
|
require.Equal(t, uint64(1), count)
|
||||||
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
|
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
|
||||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
|
||||||
|
|
||||||
// 64 65
|
// 64 65
|
||||||
// Received block last epoch is 2
|
// Received block last epoch is 2
|
||||||
@@ -365,7 +359,6 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, uint64(2), count)
|
require.Equal(t, uint64(2), count)
|
||||||
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
|
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
|
||||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
|
|
||||||
|
|
||||||
// 64 65 66
|
// 64 65 66
|
||||||
// Received block last epoch is 3
|
// Received block last epoch is 3
|
||||||
@@ -717,17 +710,3 @@ func TestStore_CleanupInserting(t *testing.T) {
|
|||||||
require.NotNil(t, f.InsertNode(ctx, st, blk))
|
require.NotNil(t, f.InsertNode(ctx, st, blk))
|
||||||
require.Equal(t, false, f.HasNode(blk.Root()))
|
require.Equal(t, false, f.HasNode(blk.Root()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
|
|
||||||
f := ForkChoice{
|
|
||||||
store: &Store{
|
|
||||||
genesisTime: time.Unix(0, 0),
|
|
||||||
highestReceivedNode: &Node{
|
|
||||||
slot: 10,
|
|
||||||
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -36,7 +36,6 @@ type Store struct {
|
|||||||
treeRootNode *Node // the root node of the store tree.
|
treeRootNode *Node // the root node of the store tree.
|
||||||
headNode *Node // last head Node
|
headNode *Node // last head Node
|
||||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
|
||||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||||
genesisTime time.Time
|
genesisTime time.Time
|
||||||
|
|||||||
@@ -67,13 +67,11 @@ type FastGetter interface {
|
|||||||
HasNode([32]byte) bool
|
HasNode([32]byte) bool
|
||||||
HighestReceivedBlockSlot() primitives.Slot
|
HighestReceivedBlockSlot() primitives.Slot
|
||||||
HighestReceivedBlockRoot() [32]byte
|
HighestReceivedBlockRoot() [32]byte
|
||||||
HighestReceivedBlockDelay() primitives.Slot
|
|
||||||
IsCanonical(root [32]byte) bool
|
IsCanonical(root [32]byte) bool
|
||||||
IsOptimistic(root [32]byte) (bool, error)
|
IsOptimistic(root [32]byte) (bool, error)
|
||||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||||
JustifiedPayloadBlockHash() [32]byte
|
JustifiedPayloadBlockHash() [32]byte
|
||||||
LastRoot(primitives.Epoch) [32]byte
|
|
||||||
NodeCount() int
|
NodeCount() int
|
||||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||||
ProposerBoost() [fieldparams.RootLength]byte
|
ProposerBoost() [fieldparams.RootLength]byte
|
||||||
|
|||||||
@@ -121,13 +121,6 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
|
|||||||
return ro.getter.HighestReceivedBlockRoot()
|
return ro.getter.HighestReceivedBlockRoot()
|
||||||
}
|
}
|
||||||
|
|
||||||
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
|
|
||||||
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
|
||||||
ro.l.RLock()
|
|
||||||
defer ro.l.RUnlock()
|
|
||||||
return ro.getter.HighestReceivedBlockDelay()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
|
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
|
||||||
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||||
ro.l.RLock()
|
ro.l.RLock()
|
||||||
@@ -163,13 +156,6 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
|||||||
return ro.getter.Slot(root)
|
return ro.getter.Slot(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LastRoot delegates to the underlying forkchoice call, under a lock.
|
|
||||||
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
|
|
||||||
ro.l.RLock()
|
|
||||||
defer ro.l.RUnlock()
|
|
||||||
return ro.getter.LastRoot(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DependentRoot delegates to the underlying forkchoice call, under a lock.
|
// DependentRoot delegates to the underlying forkchoice call, under a lock.
|
||||||
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||||
ro.l.RLock()
|
ro.l.RLock()
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ const (
|
|||||||
nodeCountCalled
|
nodeCountCalled
|
||||||
highestReceivedBlockSlotCalled
|
highestReceivedBlockSlotCalled
|
||||||
highestReceivedBlockRootCalled
|
highestReceivedBlockRootCalled
|
||||||
highestReceivedBlockDelayCalled
|
|
||||||
receivedBlocksLastEpochCalled
|
receivedBlocksLastEpochCalled
|
||||||
weightCalled
|
weightCalled
|
||||||
isOptimisticCalled
|
isOptimisticCalled
|
||||||
@@ -118,11 +117,6 @@ func TestROLocking(t *testing.T) {
|
|||||||
call: highestReceivedBlockSlotCalled,
|
call: highestReceivedBlockSlotCalled,
|
||||||
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
|
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "highestReceivedBlockDelayCalled",
|
|
||||||
call: highestReceivedBlockDelayCalled,
|
|
||||||
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "receivedBlocksLastEpochCalled",
|
name: "receivedBlocksLastEpochCalled",
|
||||||
call: receivedBlocksLastEpochCalled,
|
call: receivedBlocksLastEpochCalled,
|
||||||
@@ -148,11 +142,6 @@ func TestROLocking(t *testing.T) {
|
|||||||
call: slotCalled,
|
call: slotCalled,
|
||||||
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
|
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "lastRootCalled",
|
|
||||||
call: lastRootCalled,
|
|
||||||
cb: func(g FastGetter) { g.LastRoot(0) },
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "targetRootForEpochCalled",
|
name: "targetRootForEpochCalled",
|
||||||
call: targetRootForEpochCalled,
|
call: targetRootForEpochCalled,
|
||||||
@@ -265,11 +254,6 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
|
|||||||
return [32]byte{}
|
return [32]byte{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
|
|
||||||
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||||
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
|
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
|
||||||
return 0, nil
|
return 0, nil
|
||||||
@@ -295,11 +279,6 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
|
|||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
|
|
||||||
ro.calls = append(ro.calls, lastRootCalled)
|
|
||||||
return [32]byte{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DependentRoot impoements FastGetter.
|
// DependentRoot impoements FastGetter.
|
||||||
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
|
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
|
||||||
ro.calls = append(ro.calls, dependentRootCalled)
|
ro.calls = append(ro.calls, dependentRootCalled)
|
||||||
|
|||||||
@@ -678,7 +678,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
|||||||
DB: b.db,
|
DB: b.db,
|
||||||
StateGen: b.stateGen,
|
StateGen: b.stateGen,
|
||||||
ClockWaiter: b.ClockWaiter,
|
ClockWaiter: b.ClockWaiter,
|
||||||
PartialDataColumns: b.cliCtx.Bool(flags.PartialDataColumns.Name),
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -52,7 +52,6 @@ go_library(
|
|||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/kv:go_default_library",
|
"//beacon-chain/db/kv:go_default_library",
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
"//beacon-chain/p2p/encoder:go_default_library",
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||||
|
|||||||
@@ -343,7 +343,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
|||||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||||
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
||||||
// cancelled (whichever comes first).
|
// cancelled (whichever comes first).
|
||||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error {
|
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
|
||||||
// Increase the number of broadcast attempts.
|
// Increase the number of broadcast attempts.
|
||||||
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
||||||
|
|
||||||
@@ -353,15 +353,16 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
|
|||||||
return errors.Wrap(err, "current fork digest")
|
return errors.Wrap(err, "current fork digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars, partialColumns)
|
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network.
|
||||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
// For sidecars with available peers, it uses batch publishing.
|
||||||
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
|
// For sidecars without peers, it finds peers first and then publishes individually.
|
||||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) {
|
// Both paths run in parallel. It returns when all broadcasts are complete, or the context is cancelled.
|
||||||
|
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
|
||||||
type rootAndIndex struct {
|
type rootAndIndex struct {
|
||||||
root [fieldparams.RootLength]byte
|
root [fieldparams.RootLength]byte
|
||||||
index uint64
|
index uint64
|
||||||
@@ -371,8 +372,8 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
logLevel := logrus.GetLevel()
|
logLevel := logrus.GetLevel()
|
||||||
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
||||||
|
|
||||||
topicFunc := func(dcIndex uint64) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
||||||
subnet = peerdas.ComputeSubnetForDataColumnSidecar(dcIndex)
|
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||||
topic = dataColumnSubnetToTopic(subnet, forkDigest)
|
topic = dataColumnSubnetToTopic(subnet, forkDigest)
|
||||||
wrappedSubIdx = subnet + dataColumnSubnetVal
|
wrappedSubIdx = subnet + dataColumnSubnetVal
|
||||||
return
|
return
|
||||||
@@ -385,7 +386,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
for _, sidecar := range sidecars {
|
for _, sidecar := range sidecars {
|
||||||
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
||||||
|
|
||||||
topic, wrappedSubIdx, _ := topicFunc(sidecar.Index)
|
topic, wrappedSubIdx, _ := topicFunc(sidecar)
|
||||||
// Check if we have a peer for this subnet (use RLock for read-only check).
|
// Check if we have a peer for this subnet (use RLock for read-only check).
|
||||||
mu := s.subnetLocker(wrappedSubIdx)
|
mu := s.subnetLocker(wrappedSubIdx)
|
||||||
mu.RLock()
|
mu.RLock()
|
||||||
@@ -410,7 +411,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
ctx := trace.NewContext(s.ctx, span)
|
ctx := trace.NewContext(s.ctx, span)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
topic, _, _ := topicFunc(sidecar.Index)
|
topic, _, _ := topicFunc(sidecar)
|
||||||
|
|
||||||
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
|
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
@@ -418,10 +419,6 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increase the number of successful broadcasts.
|
|
||||||
dataColumnSidecarBroadcasts.Inc()
|
|
||||||
|
|
||||||
// Record the timing for log purposes.
|
|
||||||
if logLevel >= logrus.DebugLevel {
|
if logLevel >= logrus.DebugLevel {
|
||||||
root := sidecar.BlockRoot()
|
root := sidecar.BlockRoot()
|
||||||
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
||||||
@@ -436,7 +433,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
ctx := trace.NewContext(s.ctx, span)
|
ctx := trace.NewContext(s.ctx, span)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
topic, wrappedSubIdx, subnet := topicFunc(sidecar.Index)
|
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
|
||||||
|
|
||||||
// Find peers for this sidecar's subnet.
|
// Find peers for this sidecar's subnet.
|
||||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||||
@@ -461,32 +458,6 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
// Note: There is not batch publish for partial columns.
|
|
||||||
for _, partialColumn := range partialColumns {
|
|
||||||
individualWg.Go(func() {
|
|
||||||
_, span := trace.StartSpan(ctx, "p2p.broadcastPartialDataColumn")
|
|
||||||
ctx := trace.NewContext(s.ctx, span)
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
topic, wrappedSubIdx, subnet := topicFunc(partialColumn.Index)
|
|
||||||
|
|
||||||
// Find peers for this sidecar's subnet.
|
|
||||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
|
||||||
tracing.AnnotateError(span, err)
|
|
||||||
log.WithError(err).Error("Cannot find peers if needed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fullTopicStr := topic + s.Encoding().ProtocolSuffix()
|
|
||||||
if err := s.partialColumnBroadcaster.Publish(fullTopicStr, partialColumn); err != nil {
|
|
||||||
tracing.AnnotateError(span, err)
|
|
||||||
log.WithError(err).Error("Cannot partial broadcast data column sidecar")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for batch to be populated, then publish.
|
// Wait for batch to be populated, then publish.
|
||||||
batchWg.Wait()
|
batchWg.Wait()
|
||||||
if len(sidecarsWithPeers) > 0 {
|
if len(sidecarsWithPeers) > 0 {
|
||||||
|
|||||||
@@ -803,7 +803,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
|||||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||||
|
|
||||||
// Broadcast to peers and wait.
|
// Broadcast to peers and wait.
|
||||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar}, nil)
|
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Receive the message.
|
// Receive the message.
|
||||||
@@ -969,7 +969,7 @@ func TestService_BroadcastDataColumnRoundRobin(t *testing.T) {
|
|||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Broadcast all sidecars.
|
// Broadcast all sidecars.
|
||||||
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars, nil)
|
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Give some time for messages to be sent.
|
// Give some time for messages to be sent.
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ const (
|
|||||||
// Config for the p2p service. These parameters are set from application level flags
|
// Config for the p2p service. These parameters are set from application level flags
|
||||||
// to initialize the p2p service.
|
// to initialize the p2p service.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
PartialDataColumns bool
|
|
||||||
NoDiscovery bool
|
NoDiscovery bool
|
||||||
EnableUPnP bool
|
EnableUPnP bool
|
||||||
StaticPeerID bool
|
StaticPeerID bool
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
@@ -29,7 +28,6 @@ type (
|
|||||||
Broadcaster
|
Broadcaster
|
||||||
SetStreamHandler
|
SetStreamHandler
|
||||||
PubSubProvider
|
PubSubProvider
|
||||||
PartialColumnBroadcasterProvider
|
|
||||||
PubSubTopicUser
|
PubSubTopicUser
|
||||||
SenderEncoder
|
SenderEncoder
|
||||||
PeerManager
|
PeerManager
|
||||||
@@ -54,7 +52,7 @@ type (
|
|||||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||||
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error
|
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||||
@@ -94,11 +92,6 @@ type (
|
|||||||
PubSub() *pubsub.PubSub
|
PubSub() *pubsub.PubSub
|
||||||
}
|
}
|
||||||
|
|
||||||
// PubSubProvider provides the p2p pubsub protocol.
|
|
||||||
PartialColumnBroadcasterProvider interface {
|
|
||||||
PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeerManager abstracts some peer management methods from libp2p.
|
// PeerManager abstracts some peer management methods from libp2p.
|
||||||
PeerManager interface {
|
PeerManager interface {
|
||||||
Disconnect(peer.ID) error
|
Disconnect(peer.ID) error
|
||||||
|
|||||||
@@ -157,11 +157,6 @@ var (
|
|||||||
Help: "The number of publish messages received via rpc for a particular topic",
|
Help: "The number of publish messages received via rpc for a particular topic",
|
||||||
},
|
},
|
||||||
[]string{"topic"})
|
[]string{"topic"})
|
||||||
pubsubRPCPubRecvSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "p2p_pubsub_rpc_recv_pub_size_total",
|
|
||||||
Help: "The total size of publish messages received via rpc for a particular topic",
|
|
||||||
},
|
|
||||||
[]string{"topic", "is_partial"})
|
|
||||||
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "p2p_pubsub_rpc_drop_total",
|
Name: "p2p_pubsub_rpc_drop_total",
|
||||||
Help: "The number of messages dropped via rpc for a particular control message",
|
Help: "The number of messages dropped via rpc for a particular control message",
|
||||||
@@ -176,11 +171,6 @@ var (
|
|||||||
Help: "The number of publish messages dropped via rpc for a particular topic",
|
Help: "The number of publish messages dropped via rpc for a particular topic",
|
||||||
},
|
},
|
||||||
[]string{"topic"})
|
[]string{"topic"})
|
||||||
pubsubRPCPubDropSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "p2p_pubsub_rpc_drop_pub_size_total",
|
|
||||||
Help: "The total size of publish messages dropped via rpc for a particular topic",
|
|
||||||
},
|
|
||||||
[]string{"topic", "is_partial"})
|
|
||||||
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "p2p_pubsub_rpc_sent_total",
|
Name: "p2p_pubsub_rpc_sent_total",
|
||||||
Help: "The number of messages sent via rpc for a particular control message",
|
Help: "The number of messages sent via rpc for a particular control message",
|
||||||
@@ -195,16 +185,6 @@ var (
|
|||||||
Help: "The number of publish messages sent via rpc for a particular topic",
|
Help: "The number of publish messages sent via rpc for a particular topic",
|
||||||
},
|
},
|
||||||
[]string{"topic"})
|
[]string{"topic"})
|
||||||
pubsubRPCPubSentSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "gossipsub_pubsub_rpc_sent_pub_size_total",
|
|
||||||
Help: "The total size of publish messages sent via rpc for a particular topic",
|
|
||||||
},
|
|
||||||
[]string{"topic", "is_partial"})
|
|
||||||
pubsubMeshPeers = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Name: "gossipsub_mesh_peers",
|
|
||||||
Help: "The number of capable peers in mesh",
|
|
||||||
},
|
|
||||||
[]string{"topic", "supports_partial"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Service) updateMetrics() {
|
func (s *Service) updateMetrics() {
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
load("@prysm//tools/go:def.bzl", "go_library")
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"log.go",
|
|
||||||
"metrics.go",
|
|
||||||
"partial.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster",
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
deps = [
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//consensus-types/blocks:go_default_library",
|
|
||||||
"//internal/logrusadapter:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages/bitmap:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
load("@prysm//tools/go:def.bzl", "go_test")
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
size = "medium",
|
|
||||||
srcs = ["two_node_test.go"],
|
|
||||||
deps = [
|
|
||||||
"//beacon-chain/core/peerdas:go_default_library",
|
|
||||||
"//beacon-chain/p2p:go_default_library",
|
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//config/fieldparams:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//consensus-types/blocks:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
|
||||||
"//testing/assert:go_default_library",
|
|
||||||
"//testing/require:go_default_library",
|
|
||||||
"//testing/util:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//x/simlibp2p:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
|
||||||
"@com_github_marcopolo_simnet//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,256 +0,0 @@
|
|||||||
package integrationtest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"testing/synctest"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
|
|
||||||
"github.com/marcopolo/simnet"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestTwoNodePartialColumnExchange tests that two nodes can exchange partial columns
|
|
||||||
// and reconstruct the complete column. Node 1 has cells 0-2, Node 2 has cells 3-5.
|
|
||||||
// After exchange, both should have all cells.
|
|
||||||
func TestTwoNodePartialColumnExchange(t *testing.T) {
|
|
||||||
synctest.Test(t, func(t *testing.T) {
|
|
||||||
// Create a simulated libp2p network
|
|
||||||
latency := time.Millisecond * 10
|
|
||||||
network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
|
|
||||||
{LinkSettings: simnet.NodeBiDiLinkSettings{
|
|
||||||
Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
|
|
||||||
Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
|
|
||||||
}, Count: 2},
|
|
||||||
}, simlibp2p.NetworkSettings{UseBlankHost: true})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, network.Start())
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, network.Close())
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
for _, node := range meta.Nodes {
|
|
||||||
err := node.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
h1 := meta.Nodes[0]
|
|
||||||
h2 := meta.Nodes[1]
|
|
||||||
|
|
||||||
logger := logrus.New()
|
|
||||||
logger.SetLevel(logrus.DebugLevel)
|
|
||||||
broadcaster1 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
|
|
||||||
broadcaster2 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
|
|
||||||
|
|
||||||
opts1 := broadcaster1.AppendPubSubOpts([]pubsub.Option{
|
|
||||||
pubsub.WithMessageSigning(false),
|
|
||||||
pubsub.WithStrictSignatureVerification(false),
|
|
||||||
})
|
|
||||||
opts2 := broadcaster2.AppendPubSubOpts([]pubsub.Option{
|
|
||||||
pubsub.WithMessageSigning(false),
|
|
||||||
pubsub.WithStrictSignatureVerification(false),
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
ps1, err := pubsub.NewGossipSub(ctx, h1, opts1...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
ps2, err := pubsub.NewGossipSub(ctx, h2, opts2...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
broadcaster1.Stop()
|
|
||||||
broadcaster2.Stop()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Generate Test Data
|
|
||||||
var blockRoot [fieldparams.RootLength]byte
|
|
||||||
copy(blockRoot[:], []byte("test-block-root"))
|
|
||||||
|
|
||||||
numCells := 6
|
|
||||||
commitments := make([][]byte, numCells)
|
|
||||||
cells := make([][]byte, numCells)
|
|
||||||
proofs := make([][]byte, numCells)
|
|
||||||
|
|
||||||
for i := range numCells {
|
|
||||||
commitments[i] = make([]byte, 48)
|
|
||||||
|
|
||||||
cells[i] = make([]byte, 2048)
|
|
||||||
_, err := rand.Read(cells[i])
|
|
||||||
require.NoError(t, err)
|
|
||||||
proofs[i] = make([]byte, 48)
|
|
||||||
_ = fmt.Appendf(proofs[i][:0], "proof %d", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
roDC, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
|
||||||
{
|
|
||||||
BodyRoot: blockRoot[:],
|
|
||||||
KzgCommitments: commitments,
|
|
||||||
Column: cells,
|
|
||||||
KzgProofs: proofs,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
pc1, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
|
|
||||||
require.NoError(t, err)
|
|
||||||
pc2, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Split data
|
|
||||||
for i := range numCells {
|
|
||||||
if i%2 == 0 {
|
|
||||||
pc1.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
|
|
||||||
} else {
|
|
||||||
pc2.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup Topic and Subscriptions
|
|
||||||
digest := params.ForkDigest(0)
|
|
||||||
columnIndex := uint64(12)
|
|
||||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
|
||||||
topicStr := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) +
|
|
||||||
encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
topic1, err := ps1.Join(topicStr, pubsub.RequestPartialMessages())
|
|
||||||
require.NoError(t, err)
|
|
||||||
topic2, err := ps2.Join(topicStr, pubsub.RequestPartialMessages())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Header validator
|
|
||||||
headerValidator := func(header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
|
|
||||||
if header == nil {
|
|
||||||
return false, fmt.Errorf("nil header")
|
|
||||||
}
|
|
||||||
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
|
||||||
return true, fmt.Errorf("nil signed block header")
|
|
||||||
}
|
|
||||||
if len(header.KzgCommitments) == 0 {
|
|
||||||
return true, fmt.Errorf("empty kzg commitments")
|
|
||||||
}
|
|
||||||
// Verify inclusion proof
|
|
||||||
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
|
|
||||||
return true, fmt.Errorf("invalid inclusion proof: %w", err)
|
|
||||||
}
|
|
||||||
t.Log("Header validation passed")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cellValidator := func(_ []blocks.CellProofBundle) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
node1Complete := make(chan blocks.VerifiedRODataColumn, 1)
|
|
||||||
node2Complete := make(chan blocks.VerifiedRODataColumn, 1)
|
|
||||||
|
|
||||||
handler1 := func(topic string, col blocks.VerifiedRODataColumn) {
|
|
||||||
t.Logf("Node 1: Completed! Column has %d cells", len(col.Column))
|
|
||||||
node1Complete <- col
|
|
||||||
}
|
|
||||||
|
|
||||||
handler2 := func(topic string, col blocks.VerifiedRODataColumn) {
|
|
||||||
t.Logf("Node 2: Completed! Column has %d cells", len(col.Column))
|
|
||||||
node2Complete <- col
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect hosts
|
|
||||||
err = h1.Connect(context.Background(), peer.AddrInfo{
|
|
||||||
ID: h2.ID(),
|
|
||||||
Addrs: h2.Addrs(),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
time.Sleep(300 * time.Millisecond)
|
|
||||||
|
|
||||||
// Subscribe to regular GossipSub (critical for partial message RPC exchange!)
|
|
||||||
sub1, err := topic1.Subscribe()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer sub1.Cancel()
|
|
||||||
|
|
||||||
sub2, err := topic2.Subscribe()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer sub2.Cancel()
|
|
||||||
|
|
||||||
noopHeaderHandler := func(header *ethpb.PartialDataColumnHeader) chan bool {
|
|
||||||
ch := make(chan bool)
|
|
||||||
close(ch)
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
broadcaster1.ValidateHeader = headerValidator
|
|
||||||
broadcaster1.ValidateColumn = cellValidator
|
|
||||||
broadcaster1.HandleColumn = handler1
|
|
||||||
broadcaster1.HandleHeader = noopHeaderHandler
|
|
||||||
|
|
||||||
broadcaster2.ValidateHeader = headerValidator
|
|
||||||
broadcaster2.ValidateColumn = cellValidator
|
|
||||||
broadcaster2.HandleColumn = handler2
|
|
||||||
broadcaster2.HandleHeader = noopHeaderHandler
|
|
||||||
|
|
||||||
go broadcaster1.Start()
|
|
||||||
go broadcaster2.Start()
|
|
||||||
|
|
||||||
err = broadcaster1.Subscribe(topic1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = broadcaster2.Subscribe(topic2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for mesh to form
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Publish
|
|
||||||
t.Log("Publishing from Node 1")
|
|
||||||
err = broadcaster1.Publish(topicStr, pc1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
t.Log("Publishing from Node 2")
|
|
||||||
err = broadcaster2.Publish(topicStr, pc2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for Completion
|
|
||||||
timeout := time.After(10 * time.Second)
|
|
||||||
var col1, col2 blocks.VerifiedRODataColumn
|
|
||||||
receivedCount := 0
|
|
||||||
|
|
||||||
for receivedCount < 2 {
|
|
||||||
select {
|
|
||||||
case col1 = <-node1Complete:
|
|
||||||
t.Log("Node 1 completed reconstruction")
|
|
||||||
receivedCount++
|
|
||||||
case col2 = <-node2Complete:
|
|
||||||
t.Log("Node 2 completed reconstruction")
|
|
||||||
receivedCount++
|
|
||||||
case <-timeout:
|
|
||||||
t.Fatalf("Timeout: Only %d/2 nodes completed", receivedCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify both columns have all cells
|
|
||||||
assert.Equal(t, numCells, len(col1.Column), "Node 1 should have all cells")
|
|
||||||
assert.Equal(t, numCells, len(col2.Column), "Node 2 should have all cells")
|
|
||||||
assert.DeepSSZEqual(t, cells, col1.Column, "Node 1 cell mismatch")
|
|
||||||
assert.DeepSSZEqual(t, cells, col2.Column, "Node 2 cell mismatch")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
|
||||||
package partialdatacolumnbroadcaster
|
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
|
||||||
var log = logrus.WithField("package", "beacon-chain/p2p/partialdatacolumnbroadcaster")
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
package partialdatacolumnbroadcaster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
partialMessageUsefulCellsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_partial_message_useful_cells_total",
|
|
||||||
Help: "Number of useful cells received via a partial message",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
|
|
||||||
partialMessageCellsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_partial_message_cells_received_total",
|
|
||||||
Help: "Number of total cells received via a partial message",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
)
|
|
||||||
@@ -1,577 +0,0 @@
|
|||||||
package partialdatacolumnbroadcaster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"log/slog"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
|
||||||
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
|
|
||||||
"github.com/libp2p/go-libp2p-pubsub/partialmessages/bitmap"
|
|
||||||
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODOs:
|
|
||||||
// different eager push strategies:
|
|
||||||
// - no eager push
|
|
||||||
// - full column eager push
|
|
||||||
// - With debouncing - some factor of RTT
|
|
||||||
// - eager push missing cells
|
|
||||||
|
|
||||||
const TTLInSlots = 3
|
|
||||||
const maxConcurrentValidators = 128
|
|
||||||
const headerHandledTimeout = time.Second * 1
|
|
||||||
|
|
||||||
var dataColumnTopicRegex = regexp.MustCompile(`data_column_sidecar_(\d+)`)
|
|
||||||
|
|
||||||
func extractColumnIndexFromTopic(topic string) (uint64, error) {
|
|
||||||
matches := dataColumnTopicRegex.FindStringSubmatch(topic)
|
|
||||||
if len(matches) < 2 {
|
|
||||||
return 0, errors.New("could not extract column index from topic")
|
|
||||||
}
|
|
||||||
return strconv.ParseUint(matches[1], 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderValidator validates a PartialDataColumnHeader.
|
|
||||||
// Returns (reject, err) where:
|
|
||||||
// - reject=true, err!=nil: REJECT - peer should be penalized
|
|
||||||
// - reject=false, err!=nil: IGNORE - don't penalize, just ignore
|
|
||||||
// - reject=false, err=nil: valid header
|
|
||||||
type HeaderValidator func(header *ethpb.PartialDataColumnHeader) (reject bool, err error)
|
|
||||||
type HeaderHandler func(header *ethpb.PartialDataColumnHeader) chan bool
|
|
||||||
type ColumnValidator func(cells []blocks.CellProofBundle) error
|
|
||||||
|
|
||||||
type PartialColumnBroadcaster struct {
|
|
||||||
logger *logrus.Logger
|
|
||||||
|
|
||||||
ps *pubsub.PubSub
|
|
||||||
stop chan struct{}
|
|
||||||
|
|
||||||
ValidateHeader HeaderValidator
|
|
||||||
ValidateColumn ColumnValidator
|
|
||||||
HandleColumn SubHandler
|
|
||||||
HandleHeader HeaderHandler
|
|
||||||
|
|
||||||
headerHandled map[string]chan bool
|
|
||||||
|
|
||||||
// map topic -> *pubsub.Topic
|
|
||||||
topics map[string]*pubsub.Topic
|
|
||||||
|
|
||||||
concurrentValidatorSemaphore chan struct{}
|
|
||||||
|
|
||||||
// map topic -> map[groupID]PartialColumn
|
|
||||||
partialMsgStore map[string]map[string]*blocks.PartialDataColumn
|
|
||||||
|
|
||||||
groupTTL map[string]int8
|
|
||||||
|
|
||||||
// validHeaderCache caches validated headers by group ID (works across topics)
|
|
||||||
validHeaderCache map[string]*ethpb.PartialDataColumnHeader
|
|
||||||
|
|
||||||
incomingReq chan request
|
|
||||||
}
|
|
||||||
|
|
||||||
type requestKind uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
requestKindPublish requestKind = iota
|
|
||||||
requestKindSubscribe
|
|
||||||
requestKindUnsubscribe
|
|
||||||
requestKindHandleIncomingRPC
|
|
||||||
requestKindCellsValidated
|
|
||||||
)
|
|
||||||
|
|
||||||
type request struct {
|
|
||||||
kind requestKind
|
|
||||||
response chan error
|
|
||||||
sub subscribe
|
|
||||||
unsub unsubscribe
|
|
||||||
publish publish
|
|
||||||
incomingRPC rpcWithFrom
|
|
||||||
cellsValidated *cellsValidated
|
|
||||||
}
|
|
||||||
|
|
||||||
type publish struct {
|
|
||||||
topic string
|
|
||||||
c blocks.PartialDataColumn
|
|
||||||
}
|
|
||||||
|
|
||||||
type subscribe struct {
|
|
||||||
t *pubsub.Topic
|
|
||||||
}
|
|
||||||
|
|
||||||
type unsubscribe struct {
|
|
||||||
topic string
|
|
||||||
}
|
|
||||||
|
|
||||||
type rpcWithFrom struct {
|
|
||||||
*pubsub_pb.PartialMessagesExtension
|
|
||||||
from peer.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
type cellsValidated struct {
|
|
||||||
validationTook time.Duration
|
|
||||||
topic string
|
|
||||||
group []byte
|
|
||||||
cellIndices []uint64
|
|
||||||
cells []blocks.CellProofBundle
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBroadcaster(logger *logrus.Logger) *PartialColumnBroadcaster {
|
|
||||||
return &PartialColumnBroadcaster{
|
|
||||||
topics: make(map[string]*pubsub.Topic),
|
|
||||||
partialMsgStore: make(map[string]map[string]*blocks.PartialDataColumn),
|
|
||||||
groupTTL: make(map[string]int8),
|
|
||||||
validHeaderCache: make(map[string]*ethpb.PartialDataColumnHeader),
|
|
||||||
headerHandled: make(map[string]chan bool),
|
|
||||||
// GossipSub sends the messages to this channel. The buffer should be
|
|
||||||
// big enough to avoid dropping messages. We don't want to block the gossipsub event loop for this.
|
|
||||||
incomingReq: make(chan request, 128*16),
|
|
||||||
logger: logger,
|
|
||||||
|
|
||||||
concurrentValidatorSemaphore: make(chan struct{}, maxConcurrentValidators),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendPubSubOpts adds the necessary pubsub options to enable partial messages.
|
|
||||||
func (p *PartialColumnBroadcaster) AppendPubSubOpts(opts []pubsub.Option) []pubsub.Option {
|
|
||||||
slogger := slog.New(logrusadapter.Handler{Logger: p.logger})
|
|
||||||
opts = append(opts,
|
|
||||||
pubsub.WithPartialMessagesExtension(&partialmessages.PartialMessagesExtension{
|
|
||||||
Logger: slogger,
|
|
||||||
MergePartsMetadata: func(topic string, left, right partialmessages.PartsMetadata) partialmessages.PartsMetadata {
|
|
||||||
if len(left) == 0 {
|
|
||||||
return right
|
|
||||||
}
|
|
||||||
merged, err := bitfield.Bitlist(left).Or(bitfield.Bitlist(right))
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Warn("Failed to merge bitfields", "err", err, "left", left, "right", right)
|
|
||||||
return left
|
|
||||||
}
|
|
||||||
return partialmessages.PartsMetadata(merged)
|
|
||||||
},
|
|
||||||
ValidateRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
|
|
||||||
// TODO. Add some basic and fast sanity checks
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
OnIncomingRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
|
|
||||||
select {
|
|
||||||
case p.incomingReq <- request{
|
|
||||||
kind: requestKindHandleIncomingRPC,
|
|
||||||
incomingRPC: rpcWithFrom{rpc, from},
|
|
||||||
}:
|
|
||||||
default:
|
|
||||||
p.logger.Warn("Dropping incoming partial RPC", "rpc", rpc)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
func(ps *pubsub.PubSub) error {
|
|
||||||
p.ps = ps
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the event loop of the PartialColumnBroadcaster. Should be called
|
|
||||||
// within a goroutine (go p.Start())
|
|
||||||
func (p *PartialColumnBroadcaster) Start() {
|
|
||||||
if p.ValidateHeader == nil {
|
|
||||||
p.logger.Error("No header validator registered")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if p.HandleHeader == nil {
|
|
||||||
p.logger.Error("No header handler registered")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if p.ValidateColumn == nil {
|
|
||||||
p.logger.Error("No column validator registered")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.HandleColumn == nil {
|
|
||||||
p.logger.Error("No column handler registered")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p.stop = make(chan struct{})
|
|
||||||
p.loop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) loop() {
|
|
||||||
cleanup := time.NewTicker(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot))
|
|
||||||
defer cleanup.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-p.stop:
|
|
||||||
return
|
|
||||||
case <-cleanup.C:
|
|
||||||
for groupID, ttl := range p.groupTTL {
|
|
||||||
if ttl > 0 {
|
|
||||||
p.groupTTL[groupID] = ttl - 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(p.groupTTL, groupID)
|
|
||||||
delete(p.validHeaderCache, groupID)
|
|
||||||
delete(p.headerHandled, groupID)
|
|
||||||
for topic, msgStore := range p.partialMsgStore {
|
|
||||||
delete(msgStore, groupID)
|
|
||||||
if len(msgStore) == 0 {
|
|
||||||
delete(p.partialMsgStore, topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case req := <-p.incomingReq:
|
|
||||||
switch req.kind {
|
|
||||||
case requestKindPublish:
|
|
||||||
req.response <- p.publish(req.publish.topic, req.publish.c)
|
|
||||||
case requestKindSubscribe:
|
|
||||||
req.response <- p.subscribe(req.sub.t)
|
|
||||||
case requestKindUnsubscribe:
|
|
||||||
req.response <- p.unsubscribe(req.unsub.topic)
|
|
||||||
case requestKindHandleIncomingRPC:
|
|
||||||
err := p.handleIncomingRPC(req.incomingRPC)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Error("Failed to handle incoming partial RPC", "err", err)
|
|
||||||
}
|
|
||||||
case requestKindCellsValidated:
|
|
||||||
err := p.handleCellsValidated(req.cellsValidated)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Error("Failed to handle cells validated", "err", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.logger.Error("Unknown request kind", "kind", req.kind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) getDataColumn(topic string, group []byte) *blocks.PartialDataColumn {
|
|
||||||
topicStore, ok := p.partialMsgStore[topic]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
msg, ok := topicStore[string(group)]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) handleIncomingRPC(rpcWithFrom rpcWithFrom) error {
|
|
||||||
if p.ps == nil {
|
|
||||||
return errors.New("pubsub not initialized")
|
|
||||||
}
|
|
||||||
|
|
||||||
hasMessage := len(rpcWithFrom.PartialMessage) > 0
|
|
||||||
|
|
||||||
var message ethpb.PartialDataColumnSidecar
|
|
||||||
if hasMessage {
|
|
||||||
err := message.UnmarshalSSZ(rpcWithFrom.PartialMessage)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to unmarshal partial message data")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
topicID := rpcWithFrom.GetTopicID()
|
|
||||||
groupID := rpcWithFrom.GroupID
|
|
||||||
ourDataColumn := p.getDataColumn(topicID, groupID)
|
|
||||||
var shouldRepublish bool
|
|
||||||
|
|
||||||
if ourDataColumn == nil && hasMessage {
|
|
||||||
var header *ethpb.PartialDataColumnHeader
|
|
||||||
// Check cache first for this group
|
|
||||||
if cachedHeader, ok := p.validHeaderCache[string(groupID)]; ok {
|
|
||||||
header = cachedHeader
|
|
||||||
} else {
|
|
||||||
// We haven't seen this group before. Check if we have a valid header.
|
|
||||||
if len(message.Header) == 0 {
|
|
||||||
p.logger.Debug("No partial column found and no header in message, ignoring")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
header = message.Header[0]
|
|
||||||
reject, err := p.ValidateHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Debug("Header validation failed", "err", err, "reject", reject)
|
|
||||||
if reject {
|
|
||||||
// REJECT case: penalize the peer
|
|
||||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
|
|
||||||
}
|
|
||||||
// Both REJECT and IGNORE: don't process further
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Cache the valid header
|
|
||||||
p.validHeaderCache[string(groupID)] = header
|
|
||||||
handledCh := p.HandleHeader(header)
|
|
||||||
p.headerHandled[string(groupID)] = handledCh
|
|
||||||
}
|
|
||||||
|
|
||||||
columnIndex, err := extractColumnIndexFromTopic(topicID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
newColumn, err := blocks.NewPartialDataColumn(
|
|
||||||
header.SignedBlockHeader,
|
|
||||||
columnIndex,
|
|
||||||
header.KzgCommitments,
|
|
||||||
header.KzgCommitmentsInclusionProof,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.WithError(err).WithFields(logrus.Fields{
|
|
||||||
"topic": topicID,
|
|
||||||
"columnIndex": columnIndex,
|
|
||||||
"numCommitments": len(header.KzgCommitments),
|
|
||||||
}).Error("Failed to create partial data column from header")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save to store
|
|
||||||
topicStore, ok := p.partialMsgStore[topicID]
|
|
||||||
if !ok {
|
|
||||||
topicStore = make(map[string]*blocks.PartialDataColumn)
|
|
||||||
p.partialMsgStore[topicID] = topicStore
|
|
||||||
}
|
|
||||||
topicStore[string(newColumn.GroupID())] = &newColumn
|
|
||||||
p.groupTTL[string(newColumn.GroupID())] = TTLInSlots
|
|
||||||
|
|
||||||
ourDataColumn = &newColumn
|
|
||||||
shouldRepublish = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if ourDataColumn == nil {
|
|
||||||
// We don't have a partial column for this. Can happen if we got cells
|
|
||||||
// without a header.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logger := p.logger.WithFields(logrus.Fields{
|
|
||||||
"from": rpcWithFrom.from,
|
|
||||||
"topic": topicID,
|
|
||||||
"group": groupID,
|
|
||||||
})
|
|
||||||
|
|
||||||
if len(rpcWithFrom.PartialMessage) > 0 {
|
|
||||||
// TODO: is there any penalty we want to consider for giving us data we didn't request?
|
|
||||||
// Note that we need to be careful around race conditions and eager data.
|
|
||||||
// Also note that protobufs by design allow extra data that we don't parse.
|
|
||||||
// Marco's thoughts. No, we don't need to do anything else here.
|
|
||||||
cellIndices, cellsToVerify, err := ourDataColumn.CellsToVerifyFromPartialMessage(&message)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Track cells received via partial message
|
|
||||||
if len(cellIndices) > 0 {
|
|
||||||
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
|
|
||||||
partialMessageCellsReceivedTotal.WithLabelValues(columnIndexStr).Add(float64(len(cellIndices)))
|
|
||||||
}
|
|
||||||
if len(cellsToVerify) > 0 {
|
|
||||||
p.concurrentValidatorSemaphore <- struct{}{}
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
<-p.concurrentValidatorSemaphore
|
|
||||||
}()
|
|
||||||
start := time.Now()
|
|
||||||
err := p.ValidateColumn(cellsToVerify)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to validate cells", "err", err)
|
|
||||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackUsefulMessage)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindCellsValidated,
|
|
||||||
cellsValidated: &cellsValidated{
|
|
||||||
validationTook: time.Since(start),
|
|
||||||
topic: topicID,
|
|
||||||
group: groupID,
|
|
||||||
cells: cellsToVerify,
|
|
||||||
cellIndices: cellIndices,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
peerHas := bitmap.Bitmap(rpcWithFrom.PartsMetadata)
|
|
||||||
iHave := bitmap.Bitmap(ourDataColumn.PartsMetadata())
|
|
||||||
if !shouldRepublish && len(peerHas) > 0 && !bytes.Equal(peerHas, iHave) {
|
|
||||||
// Either we have something they don't or vice versa
|
|
||||||
shouldRepublish = true
|
|
||||||
logger.Debug("republishing due to parts metadata difference")
|
|
||||||
}
|
|
||||||
|
|
||||||
headerHandled := p.headerHandled[string(groupID)]
|
|
||||||
if headerHandled != nil {
|
|
||||||
select {
|
|
||||||
case <-headerHandled:
|
|
||||||
case <-time.After(headerHandledTimeout):
|
|
||||||
return errors.New("header not handled in time")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldRepublish {
|
|
||||||
err := p.ps.PublishPartialMessage(topicID, ourDataColumn, partialmessages.PublishOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) handleCellsValidated(cells *cellsValidated) error {
|
|
||||||
ourDataColumn := p.getDataColumn(cells.topic, cells.group)
|
|
||||||
if ourDataColumn == nil {
|
|
||||||
return errors.New("data column not found for verified cells")
|
|
||||||
}
|
|
||||||
extended := ourDataColumn.ExtendFromVerfifiedCells(cells.cellIndices, cells.cells)
|
|
||||||
p.logger.Debug("Extended partial message", "duration", cells.validationTook, "extended", extended)
|
|
||||||
|
|
||||||
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
|
|
||||||
if extended {
|
|
||||||
// Track useful cells (cells that extended our data)
|
|
||||||
partialMessageUsefulCellsTotal.WithLabelValues(columnIndexStr).Add(float64(len(cells.cells)))
|
|
||||||
|
|
||||||
// TODO: we could use the heuristic here that if this data was
|
|
||||||
// useful to us, it's likely useful to our peers and we should
|
|
||||||
// republish eagerly
|
|
||||||
|
|
||||||
if col, ok := ourDataColumn.Complete(p.logger); ok {
|
|
||||||
p.logger.Info("Completed partial column", "topic", cells.topic, "group", cells.group)
|
|
||||||
if p.HandleColumn != nil {
|
|
||||||
go p.HandleColumn(cells.topic, col)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.logger.Info("Extended partial column", "topic", cells.topic, "group", cells.group)
|
|
||||||
}
|
|
||||||
|
|
||||||
headerHandled := p.headerHandled[string(ourDataColumn.GroupID())]
|
|
||||||
if headerHandled != nil {
|
|
||||||
select {
|
|
||||||
case <-headerHandled:
|
|
||||||
case <-time.After(headerHandledTimeout):
|
|
||||||
return errors.New("header not handled in time")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err := p.ps.PublishPartialMessage(cells.topic, ourDataColumn, partialmessages.PublishOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) Stop() {
|
|
||||||
if p.stop != nil {
|
|
||||||
close(p.stop)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish publishes the partial column.
|
|
||||||
func (p *PartialColumnBroadcaster) Publish(topic string, c blocks.PartialDataColumn) error {
|
|
||||||
if p.ps == nil {
|
|
||||||
return errors.New("pubsub not initialized")
|
|
||||||
}
|
|
||||||
respCh := make(chan error)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindPublish,
|
|
||||||
response: respCh,
|
|
||||||
publish: publish{
|
|
||||||
topic: topic,
|
|
||||||
c: c,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return <-respCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) publish(topic string, c blocks.PartialDataColumn) error {
|
|
||||||
topicStore, ok := p.partialMsgStore[topic]
|
|
||||||
if !ok {
|
|
||||||
topicStore = make(map[string]*blocks.PartialDataColumn)
|
|
||||||
p.partialMsgStore[topic] = topicStore
|
|
||||||
}
|
|
||||||
|
|
||||||
var extended bool
|
|
||||||
existing := topicStore[string(c.GroupID())]
|
|
||||||
if existing != nil {
|
|
||||||
// Extend the existing column with cells being published here.
|
|
||||||
// The existing column may already contain cells received from peers. We must not overwrite it.
|
|
||||||
for i := range c.Included.Len() {
|
|
||||||
if c.Included.BitAt(i) {
|
|
||||||
extended = existing.ExtendFromVerfifiedCell(uint64(i), c.Column[i], c.KzgProofs[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if extended {
|
|
||||||
if col, ok := existing.Complete(p.logger); ok {
|
|
||||||
p.logger.Info("Completed partial column", "topic", topic, "group", existing.GroupID())
|
|
||||||
if p.HandleColumn != nil {
|
|
||||||
go p.HandleColumn(topic, col)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
topicStore[string(c.GroupID())] = &c
|
|
||||||
existing = &c
|
|
||||||
}
|
|
||||||
|
|
||||||
p.groupTTL[string(c.GroupID())] = TTLInSlots
|
|
||||||
|
|
||||||
return p.ps.PublishPartialMessage(topic, existing, partialmessages.PublishOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type SubHandler func(topic string, col blocks.VerifiedRODataColumn)
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) Subscribe(t *pubsub.Topic) error {
|
|
||||||
respCh := make(chan error)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindSubscribe,
|
|
||||||
sub: subscribe{
|
|
||||||
t: t,
|
|
||||||
},
|
|
||||||
response: respCh,
|
|
||||||
}
|
|
||||||
return <-respCh
|
|
||||||
}
|
|
||||||
func (p *PartialColumnBroadcaster) subscribe(t *pubsub.Topic) error {
|
|
||||||
topic := t.String()
|
|
||||||
if _, ok := p.topics[topic]; ok {
|
|
||||||
return errors.New("already subscribed")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.topics[topic] = t
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) Unsubscribe(topic string) error {
|
|
||||||
respCh := make(chan error)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindUnsubscribe,
|
|
||||||
unsub: unsubscribe{
|
|
||||||
topic: topic,
|
|
||||||
},
|
|
||||||
response: respCh,
|
|
||||||
}
|
|
||||||
return <-respCh
|
|
||||||
}
|
|
||||||
func (p *PartialColumnBroadcaster) unsubscribe(topic string) error {
|
|
||||||
t, ok := p.topics[topic]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("topic not found")
|
|
||||||
}
|
|
||||||
delete(p.topics, topic)
|
|
||||||
delete(p.partialMsgStore, topic)
|
|
||||||
return t.Close()
|
|
||||||
}
|
|
||||||
@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
|||||||
|
|
||||||
resAddress, err := p.Address(id)
|
resAddress, err := p.Address(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
|
assert.Equal(t, address, resAddress, "Unexpected address")
|
||||||
|
|
||||||
resDirection, err := p.Direction(id)
|
resDirection, err := p.Direction(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
|||||||
|
|
||||||
resAddress2, err := p.Address(id)
|
resAddress2, err := p.Address(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
|
assert.Equal(t, address2, resAddress2, "Unexpected address")
|
||||||
|
|
||||||
resDirection2, err := p.Direction(id)
|
resDirection2, err := p.Direction(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
|||||||
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
||||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||||
pubsub.WithRawTracer(&gossipTracer{host: s.host, allowedTopics: filt}),
|
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(s.cfg.StaticPeers) > 0 {
|
if len(s.cfg.StaticPeers) > 0 {
|
||||||
@@ -181,9 +181,6 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
|||||||
}
|
}
|
||||||
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
||||||
}
|
}
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
psOpts = s.partialColumnBroadcaster.AppendPubSubOpts(psOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return psOpts
|
return psOpts
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
package p2p
|
package p2p
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
|
||||||
|
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
@@ -10,7 +8,7 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = pubsub.RawTracer(&gossipTracer{})
|
var _ = pubsub.RawTracer(gossipTracer{})
|
||||||
|
|
||||||
// Initializes the values for the pubsub rpc action.
|
// Initializes the values for the pubsub rpc action.
|
||||||
type action int
|
type action int
|
||||||
@@ -25,160 +23,85 @@ const (
|
|||||||
// and broadcasted through gossipsub.
|
// and broadcasted through gossipsub.
|
||||||
type gossipTracer struct {
|
type gossipTracer struct {
|
||||||
host host.Host
|
host host.Host
|
||||||
|
|
||||||
allowedTopics pubsub.SubscriptionFilter
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
// map topic -> Set(peerID). Peer is in set if it supports partial messages.
|
|
||||||
partialMessagePeers map[string]map[peer.ID]struct{}
|
|
||||||
// map topic -> Set(peerID). Peer is in set if in the mesh.
|
|
||||||
meshPeers map[string]map[peer.ID]struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPeer .
|
// AddPeer .
|
||||||
func (g *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
// no-op
|
// no-op
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePeer .
|
// RemovePeer .
|
||||||
func (g *gossipTracer) RemovePeer(p peer.ID) {
|
func (g gossipTracer) RemovePeer(p peer.ID) {
|
||||||
g.mu.Lock()
|
// no-op
|
||||||
defer g.mu.Unlock()
|
|
||||||
for _, peers := range g.partialMessagePeers {
|
|
||||||
delete(peers, p)
|
|
||||||
}
|
|
||||||
for topic, peers := range g.meshPeers {
|
|
||||||
if _, ok := peers[p]; ok {
|
|
||||||
delete(peers, p)
|
|
||||||
g.updateMeshPeersMetric(topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Join .
|
// Join .
|
||||||
func (g *gossipTracer) Join(topic string) {
|
func (g gossipTracer) Join(topic string) {
|
||||||
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if g.partialMessagePeers == nil {
|
|
||||||
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
if g.partialMessagePeers[topic] == nil {
|
|
||||||
g.partialMessagePeers[topic] = make(map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if g.meshPeers == nil {
|
|
||||||
g.meshPeers = make(map[string]map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
if g.meshPeers[topic] == nil {
|
|
||||||
g.meshPeers[topic] = make(map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leave .
|
// Leave .
|
||||||
func (g *gossipTracer) Leave(topic string) {
|
func (g gossipTracer) Leave(topic string) {
|
||||||
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
delete(g.partialMessagePeers, topic)
|
|
||||||
delete(g.meshPeers, topic)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Graft .
|
// Graft .
|
||||||
func (g *gossipTracer) Graft(p peer.ID, topic string) {
|
func (g gossipTracer) Graft(p peer.ID, topic string) {
|
||||||
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if m, ok := g.meshPeers[topic]; ok {
|
|
||||||
m[p] = struct{}{}
|
|
||||||
}
|
|
||||||
g.updateMeshPeersMetric(topic)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prune .
|
// Prune .
|
||||||
func (g *gossipTracer) Prune(p peer.ID, topic string) {
|
func (g gossipTracer) Prune(p peer.ID, topic string) {
|
||||||
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if m, ok := g.meshPeers[topic]; ok {
|
|
||||||
delete(m, p)
|
|
||||||
}
|
|
||||||
g.updateMeshPeersMetric(topic)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateMessage .
|
// ValidateMessage .
|
||||||
func (g *gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverMessage .
|
// DeliverMessage .
|
||||||
func (g *gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RejectMessage .
|
// RejectMessage .
|
||||||
func (g *gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||||
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
|
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DuplicateMessage .
|
// DuplicateMessage .
|
||||||
func (g *gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// UndeliverableMessage .
|
// UndeliverableMessage .
|
||||||
func (g *gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ThrottlePeer .
|
// ThrottlePeer .
|
||||||
func (g *gossipTracer) ThrottlePeer(p peer.ID) {
|
func (g gossipTracer) ThrottlePeer(p peer.ID) {
|
||||||
agent := agentFromPid(p, g.host.Peerstore())
|
agent := agentFromPid(p, g.host.Peerstore())
|
||||||
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecvRPC .
|
// RecvRPC .
|
||||||
func (g *gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
||||||
from := rpc.From()
|
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
|
||||||
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCPubRecvSize, pubsubRPCRecv, rpc)
|
|
||||||
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
for _, sub := range rpc.Subscriptions {
|
|
||||||
topic := sub.GetTopicid()
|
|
||||||
if !g.allowedTopics.CanSubscribe(topic) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if g.partialMessagePeers == nil {
|
|
||||||
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
m, ok := g.partialMessagePeers[topic]
|
|
||||||
if !ok {
|
|
||||||
m = make(map[peer.ID]struct{})
|
|
||||||
g.partialMessagePeers[topic] = m
|
|
||||||
}
|
|
||||||
if sub.GetSubscribe() && sub.GetRequestsPartial() {
|
|
||||||
m[from] = struct{}{}
|
|
||||||
} else {
|
|
||||||
delete(m, from)
|
|
||||||
if len(m) == 0 {
|
|
||||||
delete(g.partialMessagePeers, topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendRPC .
|
// SendRPC .
|
||||||
func (g *gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||||
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCPubSentSize, pubsubRPCSent, rpc)
|
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DropRPC .
|
// DropRPC .
|
||||||
func (g *gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||||
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCPubDropSize, pubsubRPCDrop, rpc)
|
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, pubSizeCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||||
subCtr.Add(float64(len(rpc.Subscriptions)))
|
subCtr.Add(float64(len(rpc.Subscriptions)))
|
||||||
if rpc.Control != nil {
|
if rpc.Control != nil {
|
||||||
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||||
@@ -187,41 +110,12 @@ func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, p
|
|||||||
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||||
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
|
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
|
||||||
}
|
}
|
||||||
// For incoming messages from pubsub, we do not record metrics for them as these values
|
|
||||||
// could be junk.
|
|
||||||
if act == recv {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, msg := range rpc.Publish {
|
for _, msg := range rpc.Publish {
|
||||||
pubCtr.WithLabelValues(msg.GetTopic()).Inc()
|
// For incoming messages from pubsub, we do not record metrics for them as these values
|
||||||
pubSizeCtr.WithLabelValues(msg.GetTopic(), "false").Add(float64(msg.Size()))
|
// could be junk.
|
||||||
}
|
if act == recv {
|
||||||
if rpc.Partial != nil {
|
continue
|
||||||
pubCtr.WithLabelValues(rpc.Partial.GetTopicID()).Inc()
|
|
||||||
pubSizeCtr.WithLabelValues(rpc.Partial.GetTopicID(), "true").Add(float64(rpc.Partial.Size()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateMeshPeersMetric requires the caller to hold the state mutex
|
|
||||||
func (g *gossipTracer) updateMeshPeersMetric(topic string) {
|
|
||||||
meshPeers, ok := g.meshPeers[topic]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
partialPeers, ok := g.partialMessagePeers[topic]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var supportsPartial, doesNotSupportPartial float64
|
|
||||||
for p := range meshPeers {
|
|
||||||
if _, ok := partialPeers[p]; ok {
|
|
||||||
supportsPartial++
|
|
||||||
} else {
|
|
||||||
doesNotSupportPartial++
|
|
||||||
}
|
}
|
||||||
|
pubCtr.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
pubsubMeshPeers.WithLabelValues(topic, "true").Set(supportsPartial)
|
|
||||||
pubsubMeshPeers.WithLabelValues(topic, "false").Set(doesNotSupportPartial)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/async"
|
"github.com/OffchainLabs/prysm/v7/async"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||||
@@ -78,7 +77,6 @@ type Service struct {
|
|||||||
privKey *ecdsa.PrivateKey
|
privKey *ecdsa.PrivateKey
|
||||||
metaData metadata.Metadata
|
metaData metadata.Metadata
|
||||||
pubsub *pubsub.PubSub
|
pubsub *pubsub.PubSub
|
||||||
partialColumnBroadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
|
||||||
joinedTopics map[string]*pubsub.Topic
|
joinedTopics map[string]*pubsub.Topic
|
||||||
joinedTopicsLock sync.RWMutex
|
joinedTopicsLock sync.RWMutex
|
||||||
subnetsLock map[uint64]*sync.RWMutex
|
subnetsLock map[uint64]*sync.RWMutex
|
||||||
@@ -149,10 +147,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
|||||||
custodyInfoSet: make(chan struct{}),
|
custodyInfoSet: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.PartialDataColumns {
|
|
||||||
s.partialColumnBroadcaster = partialdatacolumnbroadcaster.NewBroadcaster(log.Logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
ipAddr := prysmnetwork.IPAddr()
|
ipAddr := prysmnetwork.IPAddr()
|
||||||
|
|
||||||
opts, err := s.buildOptions(ipAddr, s.privKey)
|
opts, err := s.buildOptions(ipAddr, s.privKey)
|
||||||
@@ -311,10 +305,6 @@ func (s *Service) Start() {
|
|||||||
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
|
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
|
||||||
}
|
}
|
||||||
go s.forkWatcher()
|
go s.forkWatcher()
|
||||||
|
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
go s.partialColumnBroadcaster.Start()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the p2p service and terminate all peer connections.
|
// Stop the p2p service and terminate all peer connections.
|
||||||
@@ -324,10 +314,6 @@ func (s *Service) Stop() error {
|
|||||||
if s.dv5Listener != nil {
|
if s.dv5Listener != nil {
|
||||||
s.dv5Listener.Close()
|
s.dv5Listener.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
s.partialColumnBroadcaster.Stop()
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -364,10 +350,6 @@ func (s *Service) PubSub() *pubsub.PubSub {
|
|||||||
return s.pubsub
|
return s.pubsub
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
|
||||||
return s.partialColumnBroadcaster
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host returns the currently running libp2p
|
// Host returns the currently running libp2p
|
||||||
// host of the service.
|
// host of the service.
|
||||||
func (s *Service) Host() host.Host {
|
func (s *Service) Host() host.Host {
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/peerdas:go_default_library",
|
"//beacon-chain/core/peerdas:go_default_library",
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
"//beacon-chain/p2p/encoder:go_default_library",
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/fieldparams:go_default_library",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
@@ -109,10 +108,6 @@ func (*FakeP2P) PubSub() *pubsub.PubSub {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*FakeP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetadataSeq -- fake.
|
// MetadataSeq -- fake.
|
||||||
func (*FakeP2P) MetadataSeq() uint64 {
|
func (*FakeP2P) MetadataSeq() uint64 {
|
||||||
return 0
|
return 0
|
||||||
@@ -174,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastDataColumnSidecar -- fake.
|
// BroadcastDataColumnSidecar -- fake.
|
||||||
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
|
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||||
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
|
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||||
m.BroadcastCalled.Store(true)
|
m.BroadcastCalled.Store(true)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
@@ -139,6 +138,9 @@ func connect(a, b host.Host) error {
|
|||||||
func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
||||||
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
|
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||||
require.NoError(p.t, err)
|
require.NoError(p.t, err)
|
||||||
|
p.t.Cleanup(func() {
|
||||||
|
require.NoError(p.t, h.Close())
|
||||||
|
})
|
||||||
if err := connect(h, p.BHost); err != nil {
|
if err := connect(h, p.BHost); err != nil {
|
||||||
p.t.Fatalf("Failed to connect two peers for RPC: %v", err)
|
p.t.Fatalf("Failed to connect two peers for RPC: %v", err)
|
||||||
}
|
}
|
||||||
@@ -170,6 +172,9 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
|||||||
func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||||
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
|
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||||
require.NoError(p.t, err)
|
require.NoError(p.t, err)
|
||||||
|
p.t.Cleanup(func() {
|
||||||
|
require.NoError(p.t, h.Close())
|
||||||
|
})
|
||||||
ps, err := pubsub.NewFloodSub(context.Background(), h,
|
ps, err := pubsub.NewFloodSub(context.Background(), h,
|
||||||
pubsub.WithMessageSigning(false),
|
pubsub.WithMessageSigning(false),
|
||||||
pubsub.WithStrictSignatureVerification(false),
|
pubsub.WithStrictSignatureVerification(false),
|
||||||
@@ -243,7 +248,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||||
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
|
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||||
p.BroadcastCalled.Store(true)
|
p.BroadcastCalled.Store(true)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -309,10 +314,6 @@ func (p *TestP2P) PubSub() *pubsub.PubSub {
|
|||||||
return p.pubsub
|
return p.pubsub
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TestP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect from a peer.
|
// Disconnect from a peer.
|
||||||
func (p *TestP2P) Disconnect(pid peer.ID) error {
|
func (p *TestP2P) Disconnect(pid peer.ID) error {
|
||||||
return p.BHost.Network().ClosePeer(pid)
|
return p.BHost.Network().ClosePeer(pid)
|
||||||
|
|||||||
@@ -405,6 +405,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
|||||||
MetadataProvider: s.cfg.MetadataProvider,
|
MetadataProvider: s.cfg.MetadataProvider,
|
||||||
HeadFetcher: s.cfg.HeadFetcher,
|
HeadFetcher: s.cfg.HeadFetcher,
|
||||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||||
|
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
|
||||||
}
|
}
|
||||||
|
|
||||||
const namespace = "node"
|
const namespace = "node"
|
||||||
@@ -469,6 +470,16 @@ func (s *Service) nodeEndpoints() []endpoint {
|
|||||||
handler: server.GetVersion,
|
handler: server.GetVersion,
|
||||||
methods: []string{http.MethodGet},
|
methods: []string{http.MethodGet},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
template: "/eth/v2/node/version",
|
||||||
|
name: namespace + ".GetVersionV2",
|
||||||
|
middleware: []middleware.Middleware{
|
||||||
|
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||||
|
middleware.AcceptEncodingHeaderHandler(),
|
||||||
|
},
|
||||||
|
handler: server.GetVersionV2,
|
||||||
|
methods: []string{http.MethodGet},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
template: "/eth/v1/node/health",
|
template: "/eth/v1/node/health",
|
||||||
name: namespace + ".GetHealth",
|
name: namespace + ".GetHealth",
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ func Test_endpoints(t *testing.T) {
|
|||||||
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
|
"/eth/v1/node/peers/{peer_id}": {http.MethodGet},
|
||||||
"/eth/v1/node/peer_count": {http.MethodGet},
|
"/eth/v1/node/peer_count": {http.MethodGet},
|
||||||
"/eth/v1/node/version": {http.MethodGet},
|
"/eth/v1/node/version": {http.MethodGet},
|
||||||
|
"/eth/v2/node/version": {http.MethodGet},
|
||||||
"/eth/v1/node/syncing": {http.MethodGet},
|
"/eth/v1/node/syncing": {http.MethodGet},
|
||||||
"/eth/v1/node/health": {http.MethodGet},
|
"/eth/v1/node/health": {http.MethodGet},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"handlers.go",
|
"handlers.go",
|
||||||
"handlers_peers.go",
|
"handlers_peers.go",
|
||||||
|
"log.go",
|
||||||
"server.go",
|
"server.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/node",
|
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/node",
|
||||||
@@ -30,6 +31,7 @@ go_library(
|
|||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
"@org_golang_google_grpc//:go_default_library",
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -44,6 +46,7 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//api/server/structs:go_default_library",
|
"//api/server/structs:go_default_library",
|
||||||
"//beacon-chain/blockchain/testing:go_default_library",
|
"//beacon-chain/blockchain/testing:go_default_library",
|
||||||
|
"//beacon-chain/execution/testing:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/testing:go_default_library",
|
"//beacon-chain/p2p/testing:go_default_library",
|
||||||
|
|||||||
@@ -116,6 +116,35 @@ func (*Server) GetVersion(w http.ResponseWriter, r *http.Request) {
|
|||||||
httputil.WriteJson(w, resp)
|
httputil.WriteJson(w, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetVersionV2 Retrieves structured information about the version of the beacon node and its attached
|
||||||
|
// execution client(s) in the same format as used on the Engine API
|
||||||
|
func (s *Server) GetVersionV2(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx, span := trace.StartSpan(r.Context(), "node.GetVersionV2")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
elDataList, err := s.ExecutionEngineCaller.GetClientVersionV1(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).WithField("endpoint", "GetVersionV2").Debug("Could not get execution client version")
|
||||||
|
}
|
||||||
|
|
||||||
|
commit := version.GitCommit()
|
||||||
|
if len(commit) >= 8 {
|
||||||
|
commit = commit[:8]
|
||||||
|
}
|
||||||
|
resp := &structs.GetVersionV2Response{
|
||||||
|
Data: &structs.VersionV2{
|
||||||
|
BeaconNode: &structs.ClientVersionV1{
|
||||||
|
Code: "PM",
|
||||||
|
Name: "Prysm",
|
||||||
|
Version: version.SemanticVersion(),
|
||||||
|
Commit: commit,
|
||||||
|
},
|
||||||
|
ExecutionClient: elDataList,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
httputil.WriteJson(w, resp)
|
||||||
|
}
|
||||||
|
|
||||||
// GetHealth returns node health status in http status codes. Useful for load balancers.
|
// GetHealth returns node health status in http status codes. Useful for load balancers.
|
||||||
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/OffchainLabs/go-bitfield"
|
"github.com/OffchainLabs/go-bitfield"
|
||||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||||
|
mockengine "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||||
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||||
@@ -90,6 +91,41 @@ func TestGetVersion(t *testing.T) {
|
|||||||
assert.StringContains(t, arch, resp.Data.Version)
|
assert.StringContains(t, arch, resp.Data.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetVersionV2(t *testing.T) {
|
||||||
|
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/node/version", nil)
|
||||||
|
writer := httptest.NewRecorder()
|
||||||
|
writer.Body = &bytes.Buffer{}
|
||||||
|
|
||||||
|
s := &Server{
|
||||||
|
ExecutionEngineCaller: &mockengine.EngineClient{
|
||||||
|
ClientVersion: []*structs.ClientVersionV1{{
|
||||||
|
Code: "EL",
|
||||||
|
Name: "ExecutionClient",
|
||||||
|
Version: "v1.0.0",
|
||||||
|
Commit: "abcdef12",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
s.GetVersionV2(writer, request)
|
||||||
|
require.Equal(t, http.StatusOK, writer.Code)
|
||||||
|
|
||||||
|
resp := &structs.GetVersionV2Response{}
|
||||||
|
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||||
|
require.NotNil(t, resp)
|
||||||
|
require.NotNil(t, resp.Data)
|
||||||
|
require.NotNil(t, resp.Data.BeaconNode)
|
||||||
|
require.NotNil(t, resp.Data.ExecutionClient)
|
||||||
|
e := resp.Data.ExecutionClient[0]
|
||||||
|
require.Equal(t, "EL", e.Code)
|
||||||
|
require.Equal(t, "ExecutionClient", e.Name)
|
||||||
|
require.Equal(t, "v1.0.0", e.Version)
|
||||||
|
require.Equal(t, "abcdef12", e.Commit)
|
||||||
|
require.Equal(t, "PM", resp.Data.BeaconNode.Code)
|
||||||
|
require.Equal(t, "Prysm", resp.Data.BeaconNode.Name)
|
||||||
|
require.Equal(t, version.SemanticVersion(), resp.Data.BeaconNode.Version)
|
||||||
|
require.Equal(t, true, len(resp.Data.BeaconNode.Commit) <= 8)
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetHealth(t *testing.T) {
|
func TestGetHealth(t *testing.T) {
|
||||||
checker := &syncmock.Sync{}
|
checker := &syncmock.Sync{}
|
||||||
optimisticFetcher := &mock.ChainService{Optimistic: false}
|
optimisticFetcher := &mock.ChainService{Optimistic: false}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||||
package logrusadapter
|
package node
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
import "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||||
var log = logrus.WithField("package", "internal/logrusadapter")
|
var log = logrus.WithField("package", "beacon-chain/rpc/eth/node")
|
||||||
@@ -26,4 +26,5 @@ type Server struct {
|
|||||||
GenesisTimeFetcher blockchain.TimeFetcher
|
GenesisTimeFetcher blockchain.TimeFetcher
|
||||||
HeadFetcher blockchain.HeadFetcher
|
HeadFetcher blockchain.HeadFetcher
|
||||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||||
|
ExecutionEngineCaller execution.EngineCaller
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
|
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||||
@@ -309,7 +308,6 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
|||||||
}
|
}
|
||||||
|
|
||||||
rob, err := blocks.NewROBlockWithRoot(block, root)
|
rob, err := blocks.NewROBlockWithRoot(block, root)
|
||||||
var partialColumns []blocks.PartialDataColumn
|
|
||||||
if block.IsBlinded() {
|
if block.IsBlinded() {
|
||||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||||
if errors.Is(err, builderapi.ErrBadGateway) {
|
if errors.Is(err, builderapi.ErrBadGateway) {
|
||||||
@@ -317,7 +315,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
|||||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||||
}
|
}
|
||||||
} else if block.Version() >= version.Deneb {
|
} else if block.Version() >= version.Deneb {
|
||||||
blobSidecars, dataColumnSidecars, partialColumns, err = vs.handleUnblindedBlock(rob, req)
|
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||||
@@ -337,7 +335,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars, partialColumns); err != nil {
|
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||||
}
|
}
|
||||||
if err := <-errChan; err != nil {
|
if err := <-errChan; err != nil {
|
||||||
@@ -354,10 +352,9 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
|||||||
root [fieldparams.RootLength]byte,
|
root [fieldparams.RootLength]byte,
|
||||||
blobSidecars []*ethpb.BlobSidecar,
|
blobSidecars []*ethpb.BlobSidecar,
|
||||||
dataColumnSidecars []blocks.RODataColumn,
|
dataColumnSidecars []blocks.RODataColumn,
|
||||||
partialColumns []blocks.PartialDataColumn,
|
|
||||||
) error {
|
) error {
|
||||||
if block.Version() >= version.Fulu {
|
if block.Version() >= version.Fulu {
|
||||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, partialColumns); err != nil {
|
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
|
||||||
return errors.Wrap(err, "broadcast and receive data columns")
|
return errors.Wrap(err, "broadcast and receive data columns")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -406,41 +403,34 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
|||||||
func (vs *Server) handleUnblindedBlock(
|
func (vs *Server) handleUnblindedBlock(
|
||||||
block blocks.ROBlock,
|
block blocks.ROBlock,
|
||||||
req *ethpb.GenericSignedBeaconBlock,
|
req *ethpb.GenericSignedBeaconBlock,
|
||||||
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, []blocks.PartialDataColumn, error) {
|
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
|
||||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if block.Version() >= version.Fulu {
|
if block.Version() >= version.Fulu {
|
||||||
// Compute cells and proofs from the blobs and cell proofs.
|
// Compute cells and proofs from the blobs and cell proofs.
|
||||||
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
|
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct data column sidecars from the signed block and cells and proofs.
|
// Construct data column sidecars from the signed block and cells and proofs.
|
||||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
|
return nil, nil, errors.Wrap(err, "data column sidcars")
|
||||||
}
|
}
|
||||||
|
|
||||||
included := bitfield.NewBitlist(uint64(len(cellsPerBlob)))
|
return nil, roDataColumnSidecars, nil
|
||||||
included = included.Not() // all bits set to 1
|
|
||||||
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, roDataColumnSidecars, partialColumns, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "build blob sidecars")
|
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
return blobSidecars, nil, nil, nil
|
return blobSidecars, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||||
@@ -507,7 +497,7 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn, partialColumns []blocks.PartialDataColumn) error {
|
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
|
||||||
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||||
for _, sidecar := range roSidecars {
|
for _, sidecar := range roSidecars {
|
||||||
@@ -516,7 +506,7 @@ func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast sidecars (non blocking).
|
// Broadcast sidecars (non blocking).
|
||||||
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars, partialColumns); err != nil {
|
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
|
||||||
return errors.Wrap(err, "broadcast data column sidecars")
|
return errors.Wrap(err, "broadcast data column sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -46,14 +46,20 @@ func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fiel
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
|
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
|
||||||
//
|
//
|
||||||
// builder = state.builders[builder_index]
|
// <spec fn="is_active_builder" fork="gloas" hash="1a599fb2">
|
||||||
// return (
|
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
||||||
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
// """
|
||||||
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
// Check if the builder at ``builder_index`` is active for the given ``state``.
|
||||||
// )
|
// """
|
||||||
|
// builder = state.builders[builder_index]
|
||||||
|
// return (
|
||||||
|
// # Placement in builder list is finalized
|
||||||
|
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
||||||
|
// # Has not initiated exit
|
||||||
|
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
||||||
|
// )
|
||||||
|
// </spec>
|
||||||
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
|
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
|
||||||
if b.version < version.Gloas {
|
if b.version < version.Gloas {
|
||||||
return false, errNotSupported("IsActiveBuilder", b.version)
|
return false, errNotSupported("IsActiveBuilder", b.version)
|
||||||
@@ -72,15 +78,18 @@ func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
|
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
|
||||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
|
||||||
// def can_builder_cover_bid(state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei) -> bool:
|
|
||||||
//
|
//
|
||||||
// builder_balance = state.builders[builder_index].balance
|
// <spec fn="can_builder_cover_bid" fork="gloas" hash="9e3f2d7c">
|
||||||
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
|
// def can_builder_cover_bid(
|
||||||
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
|
// state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei
|
||||||
// if builder_balance < min_balance:
|
// ) -> bool:
|
||||||
// return False
|
// builder_balance = state.builders[builder_index].balance
|
||||||
// return builder_balance - min_balance >= bid_amount
|
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
|
||||||
|
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
|
||||||
|
// if builder_balance < min_balance:
|
||||||
|
// return False
|
||||||
|
// return builder_balance - min_balance >= bid_amount
|
||||||
|
// </spec>
|
||||||
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
|
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
|
||||||
if b.version < version.Gloas {
|
if b.version < version.Gloas {
|
||||||
return false, errNotSupported("CanBuilderCoverBid", b.version)
|
return false, errNotSupported("CanBuilderCoverBid", b.version)
|
||||||
|
|||||||
@@ -82,20 +82,20 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
|||||||
parentBlockRoot := h.ParentBlockRoot()
|
parentBlockRoot := h.ParentBlockRoot()
|
||||||
blockHash := h.BlockHash()
|
blockHash := h.BlockHash()
|
||||||
randao := h.PrevRandao()
|
randao := h.PrevRandao()
|
||||||
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
|
blobKzgCommitments := h.BlobKzgCommitments()
|
||||||
feeRecipient := h.FeeRecipient()
|
feeRecipient := h.FeeRecipient()
|
||||||
b.latestExecutionPayloadBid = ðpb.ExecutionPayloadBid{
|
b.latestExecutionPayloadBid = ðpb.ExecutionPayloadBid{
|
||||||
ParentBlockHash: parentBlockHash[:],
|
ParentBlockHash: parentBlockHash[:],
|
||||||
ParentBlockRoot: parentBlockRoot[:],
|
ParentBlockRoot: parentBlockRoot[:],
|
||||||
BlockHash: blockHash[:],
|
BlockHash: blockHash[:],
|
||||||
PrevRandao: randao[:],
|
PrevRandao: randao[:],
|
||||||
GasLimit: h.GasLimit(),
|
GasLimit: h.GasLimit(),
|
||||||
BuilderIndex: h.BuilderIndex(),
|
BuilderIndex: h.BuilderIndex(),
|
||||||
Slot: h.Slot(),
|
Slot: h.Slot(),
|
||||||
Value: h.Value(),
|
Value: h.Value(),
|
||||||
ExecutionPayment: h.ExecutionPayment(),
|
ExecutionPayment: h.ExecutionPayment(),
|
||||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
|
BlobKzgCommitments: blobKzgCommitments,
|
||||||
FeeRecipient: feeRecipient[:],
|
FeeRecipient: feeRecipient[:],
|
||||||
}
|
}
|
||||||
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
|
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
|
||||||
|
|
||||||
|
|||||||
@@ -14,17 +14,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type testExecutionPayloadBid struct {
|
type testExecutionPayloadBid struct {
|
||||||
parentBlockHash [32]byte
|
parentBlockHash [32]byte
|
||||||
parentBlockRoot [32]byte
|
parentBlockRoot [32]byte
|
||||||
blockHash [32]byte
|
blockHash [32]byte
|
||||||
prevRandao [32]byte
|
prevRandao [32]byte
|
||||||
blobKzgCommitmentsRoot [32]byte
|
blobKzgCommitments [][]byte
|
||||||
feeRecipient [20]byte
|
feeRecipient [20]byte
|
||||||
gasLimit uint64
|
gasLimit uint64
|
||||||
builderIndex primitives.BuilderIndex
|
builderIndex primitives.BuilderIndex
|
||||||
slot primitives.Slot
|
slot primitives.Slot
|
||||||
value primitives.Gwei
|
value primitives.Gwei
|
||||||
executionPayment primitives.Gwei
|
executionPayment primitives.Gwei
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
|
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
|
||||||
@@ -40,9 +40,12 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
|
|||||||
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
|
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
|
||||||
return t.executionPayment
|
return t.executionPayment
|
||||||
}
|
}
|
||||||
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
|
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
|
||||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
func (t testExecutionPayloadBid) BlobKzgCommitmentCount() uint64 {
|
||||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
return uint64(len(t.blobKzgCommitments))
|
||||||
|
}
|
||||||
|
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||||
|
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||||
|
|
||||||
func TestSetExecutionPayloadBid(t *testing.T) {
|
func TestSetExecutionPayloadBid(t *testing.T) {
|
||||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||||
@@ -57,7 +60,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
|||||||
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
|
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
|
||||||
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
|
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
|
||||||
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
|
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
|
||||||
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
|
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
|
||||||
feeRecipient [20]byte
|
feeRecipient [20]byte
|
||||||
)
|
)
|
||||||
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
|
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
|
||||||
@@ -66,17 +69,17 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
|||||||
dirtyFields: make(map[types.FieldIndex]bool),
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
}
|
}
|
||||||
bid := testExecutionPayloadBid{
|
bid := testExecutionPayloadBid{
|
||||||
parentBlockHash: parentBlockHash,
|
parentBlockHash: parentBlockHash,
|
||||||
parentBlockRoot: parentBlockRoot,
|
parentBlockRoot: parentBlockRoot,
|
||||||
blockHash: blockHash,
|
blockHash: blockHash,
|
||||||
prevRandao: prevRandao,
|
prevRandao: prevRandao,
|
||||||
blobKzgCommitmentsRoot: blobRoot,
|
blobKzgCommitments: blobCommitments,
|
||||||
feeRecipient: feeRecipient,
|
feeRecipient: feeRecipient,
|
||||||
gasLimit: 123,
|
gasLimit: 123,
|
||||||
builderIndex: 7,
|
builderIndex: 7,
|
||||||
slot: 9,
|
slot: 9,
|
||||||
value: 11,
|
value: 11,
|
||||||
executionPayment: 22,
|
executionPayment: 22,
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, st.SetExecutionPayloadBid(bid))
|
require.NoError(t, st.SetExecutionPayloadBid(bid))
|
||||||
@@ -86,7 +89,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
|||||||
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
|
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
|
||||||
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
|
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
|
||||||
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
|
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
|
||||||
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
|
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
|
||||||
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
|
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
|
||||||
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
|
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
|
||||||
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
|
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
|
||||||
|
|||||||
@@ -58,7 +58,6 @@ go_library(
|
|||||||
"validate_bls_to_execution_change.go",
|
"validate_bls_to_execution_change.go",
|
||||||
"validate_data_column.go",
|
"validate_data_column.go",
|
||||||
"validate_light_client.go",
|
"validate_light_client.go",
|
||||||
"validate_partial_header.go",
|
|
||||||
"validate_proposer_slashing.go",
|
"validate_proposer_slashing.go",
|
||||||
"validate_sync_committee_message.go",
|
"validate_sync_committee_message.go",
|
||||||
"validate_sync_contribution_proof.go",
|
"validate_sync_contribution_proof.go",
|
||||||
@@ -99,7 +98,6 @@ go_library(
|
|||||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
"//beacon-chain/p2p/encoder:go_default_library",
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/types:go_default_library",
|
"//beacon-chain/p2p/types:go_default_library",
|
||||||
"//beacon-chain/slasher/types:go_default_library",
|
"//beacon-chain/slasher/types:go_default_library",
|
||||||
|
|||||||
@@ -2,10 +2,8 @@ package sync
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"iter"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||||
@@ -21,16 +19,9 @@ type signatureVerifier struct {
|
|||||||
resChan chan error
|
resChan chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
type errorWithSegment struct {
|
|
||||||
err error
|
|
||||||
// segment is only available if the batched verification failed
|
|
||||||
segment peerdas.CellProofBundleSegment
|
|
||||||
}
|
|
||||||
|
|
||||||
type kzgVerifier struct {
|
type kzgVerifier struct {
|
||||||
sizeHint int
|
dataColumns []blocks.RODataColumn
|
||||||
cellProofs iter.Seq[blocks.CellProofBundle]
|
resChan chan error
|
||||||
resChan chan errorWithSegment
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A routine that runs in the background to perform batch
|
// A routine that runs in the background to perform batch
|
||||||
|
|||||||
@@ -256,16 +256,6 @@ var (
|
|||||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
usefulFullColumnsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_useful_full_columns_received_total",
|
|
||||||
Help: "Number of useful full columns (any cell being useful) received",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
|
|
||||||
partialMessageColumnCompletionsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_partial_message_column_completions_total",
|
|
||||||
Help: "How often the partial message first completed the column",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Service) updateMetrics() {
|
func (s *Service) updateMetrics() {
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"slices"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -16,13 +14,11 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
@@ -65,12 +61,6 @@ type subscribeParameters struct {
|
|||||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||||
// but for which no subscriptions are needed.
|
// but for which no subscriptions are needed.
|
||||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||||
|
|
||||||
partial *partialSubscribeParameters
|
|
||||||
}
|
|
||||||
|
|
||||||
type partialSubscribeParameters struct {
|
|
||||||
broadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// shortTopic is a less verbose version of topic strings used for logging.
|
// shortTopic is a less verbose version of topic strings used for logging.
|
||||||
@@ -330,55 +320,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
|||||||
// New gossip topic in Fulu.
|
// New gossip topic in Fulu.
|
||||||
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
||||||
s.spawn(func() {
|
s.spawn(func() {
|
||||||
var ps *partialSubscribeParameters
|
|
||||||
broadcaster := s.cfg.p2p.PartialColumnBroadcaster()
|
|
||||||
if broadcaster != nil {
|
|
||||||
broadcaster.ValidateHeader = func(header *ethpb.PartialDataColumnHeader) (bool, error) {
|
|
||||||
return s.validatePartialDataColumnHeader(context.TODO(), header)
|
|
||||||
}
|
|
||||||
broadcaster.ValidateColumn = func(cellsToVerify []blocks.CellProofBundle) error {
|
|
||||||
return peerdas.VerifyDataColumnsCellsKZGProofs(len(cellsToVerify), slices.Values(cellsToVerify))
|
|
||||||
}
|
|
||||||
broadcaster.HandleColumn = func(topic string, col blocks.VerifiedRODataColumn) {
|
|
||||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
slot := col.SignedBlockHeader.Header.Slot
|
|
||||||
proposerIndex := col.SignedBlockHeader.Header.ProposerIndex
|
|
||||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, col.Index) {
|
|
||||||
s.setSeenDataColumnIndex(slot, proposerIndex, col.Index)
|
|
||||||
// This column was completed from a partial message.
|
|
||||||
partialMessageColumnCompletionsTotal.WithLabelValues(strconv.FormatUint(col.Index, 10)).Inc()
|
|
||||||
}
|
|
||||||
err := s.verifiedRODataColumnSubscriber(ctx, col)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to handle verified RO data column subscriber")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
broadcaster.HandleHeader = func(header *ethpb.PartialDataColumnHeader) chan bool {
|
|
||||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
|
||||||
defer cancel()
|
|
||||||
source := peerdas.PopulateFromPartialHeader(header)
|
|
||||||
log.WithField("slot", source.Slot()).Info("Received data column header")
|
|
||||||
|
|
||||||
doneCh := make(chan bool, 1)
|
|
||||||
|
|
||||||
// we spin up a goroutine to process the partial column header recieved via Gossipsub as handling this
|
|
||||||
// header involves going to the EL, retrieving blobs and then publishing the partial columns constructed from the blobs.
|
|
||||||
// The partial publishing needs access to the broadcaster event loop and so doing the below without a goroutine can potentially cause a deadlock.
|
|
||||||
go func() {
|
|
||||||
err := s.processDataColumnSidecarsFromExecution(ctx, source)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to process partial data column header")
|
|
||||||
}
|
|
||||||
close(doneCh)
|
|
||||||
}()
|
|
||||||
return doneCh
|
|
||||||
}
|
|
||||||
ps = &partialSubscribeParameters{
|
|
||||||
broadcaster: broadcaster,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.subscribeWithParameters(subscribeParameters{
|
s.subscribeWithParameters(subscribeParameters{
|
||||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||||
validate: s.validateDataColumn,
|
validate: s.validateDataColumn,
|
||||||
@@ -386,7 +327,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
|||||||
nse: nse,
|
nse: nse,
|
||||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||||
partial: ps,
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -425,10 +365,11 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
|
|||||||
// Impossible condition as it would mean topic does not exist.
|
// Impossible condition as it would mean topic does not exist.
|
||||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||||
}
|
}
|
||||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest)+s.cfg.p2p.Encoding().ProtocolSuffix(), validator, handle)
|
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||||
|
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||||
log := log.WithField("topic", topic)
|
log := log.WithField("topic", topic)
|
||||||
|
|
||||||
// Do not resubscribe already seen subscriptions.
|
// Do not resubscribe already seen subscriptions.
|
||||||
@@ -591,11 +532,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
|||||||
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||||
t.cancelSubscription(subnet)
|
t.cancelSubscription(subnet)
|
||||||
topic := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
|
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||||
if t.partial != nil {
|
|
||||||
_ = t.partial.broadcaster.Unsubscribe(topic)
|
|
||||||
}
|
|
||||||
s.unSubscribeFromTopic(topic)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -642,34 +579,9 @@ func (s *Service) trySubscribeSubnets(t *subnetTracker) {
|
|||||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||||
s.pruneNotWanted(t, subnetsToJoin)
|
s.pruneNotWanted(t, subnetsToJoin)
|
||||||
for _, subnet := range t.missing(subnetsToJoin) {
|
for _, subnet := range t.missing(subnetsToJoin) {
|
||||||
topicStr := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
|
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||||
topicOpts := make([]pubsub.TopicOpt, 0, 2)
|
topic := t.fullTopic(subnet, "")
|
||||||
|
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||||
requestPartial := t.partial != nil
|
|
||||||
|
|
||||||
if requestPartial {
|
|
||||||
// TODO: do we want the ability to support partial messages without requesting them?
|
|
||||||
topicOpts = append(topicOpts, pubsub.RequestPartialMessages())
|
|
||||||
}
|
|
||||||
|
|
||||||
topic, err := s.cfg.p2p.JoinTopic(topicStr, topicOpts...)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to join topic")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if requestPartial {
|
|
||||||
log.Info("Subscribing to partial columns on", topicStr)
|
|
||||||
err = t.partial.broadcaster.Subscribe(topic)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to subscribe to partial column")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We still need to subscribe to the full columns as well as partial in
|
|
||||||
// case our peers don't support partial messages.
|
|
||||||
t.track(subnet, s.subscribeWithBase(topicStr, t.validate, t.handle))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
@@ -202,16 +201,6 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
|||||||
return nil, errors.Wrap(err, "column indices to sample")
|
return nil, errors.Wrap(err, "column indices to sample")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: the deadline here was removed in https://github.com/OffchainLabs/prysm/pull/16155/files
|
|
||||||
// make sure that reintroducing it does not cause issues.
|
|
||||||
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
digest, err := s.currentForkDigest()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log := log.WithFields(logrus.Fields{
|
log := log.WithFields(logrus.Fields{
|
||||||
"root": fmt.Sprintf("%#x", source.Root()),
|
"root": fmt.Sprintf("%#x", source.Root()),
|
||||||
"slot": source.Slot(),
|
"slot": source.Slot(),
|
||||||
@@ -242,30 +231,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||||
constructedSidecars, partialColumns, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
partialBroadcaster := s.cfg.p2p.PartialColumnBroadcaster()
|
|
||||||
if partialBroadcaster != nil {
|
|
||||||
log.WithField("len(partialColumns)", len(partialColumns)).Debug("Publishing partial columns")
|
|
||||||
for i := range uint64(len(partialColumns)) {
|
|
||||||
if !columnIndicesToSample[i] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(i)
|
|
||||||
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
|
||||||
// Publish the partial column. This is idempotent if we republish the same data twice.
|
|
||||||
// Note, the "partial column" may indeed be complete. We still
|
|
||||||
// should publish to help our peers.
|
|
||||||
err = partialBroadcaster.Publish(topic, partialColumns[i])
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Warn("Failed to publish partial column")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No sidecars are retrieved from the EL, retry later
|
// No sidecars are retrieved from the EL, retry later
|
||||||
constructedCount := uint64(len(constructedSidecars))
|
constructedCount := uint64(len(constructedSidecars))
|
||||||
|
|
||||||
@@ -337,7 +307,7 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
||||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars, nil); err != nil {
|
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
|
||||||
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package sync
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||||
@@ -25,13 +24,6 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
|||||||
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track useful full columns received via gossip (not previously seen)
|
|
||||||
slot := sidecar.SignedBlockHeader.Header.Slot
|
|
||||||
proposerIndex := sidecar.SignedBlockHeader.Header.ProposerIndex
|
|
||||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, sidecar.Index) {
|
|
||||||
usefulFullColumnsReceivedTotal.WithLabelValues(strconv.FormatUint(sidecar.Index, 10)).Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
||||||
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
|
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
|
||||||
}
|
}
|
||||||
@@ -65,38 +57,6 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) verifiedRODataColumnSubscriber(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
|
||||||
log.WithField("slot", sidecar.Slot()).WithField("column", sidecar.Index).Info("Received data column sidecar")
|
|
||||||
|
|
||||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
|
||||||
return errors.Wrap(err, "receive data column sidecar")
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg errgroup.Group
|
|
||||||
wg.Go(func() error {
|
|
||||||
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
|
|
||||||
return errors.Wrap(err, "process data column sidecars from reconstruction")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
wg.Go(func() error {
|
|
||||||
// Broadcast our complete column for peers that don't use partial messages
|
|
||||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{sidecar}, nil); err != nil {
|
|
||||||
return errors.Wrap(err, "process data column sidecars from execution")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := wg.Wait(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
|
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
|
||||||
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
|
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
|
||||||
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
||||||
|
|||||||
@@ -71,7 +71,6 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||||
|
|
||||||
// Create the verifier.
|
// Create the verifier.
|
||||||
// Question(marco): Do we want the multiple columns verifier? Is batching used only for kzg proofs?
|
|
||||||
verifier := s.newColumnsVerifier(roDataColumns, verification.GossipDataColumnSidecarRequirements)
|
verifier := s.newColumnsVerifier(roDataColumns, verification.GossipDataColumnSidecarRequirements)
|
||||||
|
|
||||||
// Start the verification process.
|
// Start the verification process.
|
||||||
|
|||||||
@@ -1,143 +0,0 @@
|
|||||||
package sync
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// REJECT errors - peer should be penalized
|
|
||||||
errHeaderEmptyCommitments = errors.New("header has no kzg commitments")
|
|
||||||
errHeaderParentInvalid = errors.New("header parent invalid")
|
|
||||||
errHeaderSlotNotAfterParent = errors.New("header slot not after parent")
|
|
||||||
errHeaderNotFinalizedDescendant = errors.New("header not finalized descendant")
|
|
||||||
errHeaderInvalidInclusionProof = errors.New("invalid inclusion proof")
|
|
||||||
errHeaderInvalidSignature = errors.New("invalid proposer signature")
|
|
||||||
errHeaderUnexpectedProposer = errors.New("unexpected proposer index")
|
|
||||||
|
|
||||||
// IGNORE errors - don't penalize peer
|
|
||||||
errHeaderNil = errors.New("nil header")
|
|
||||||
errHeaderFromFuture = errors.New("header is from future slot")
|
|
||||||
errHeaderNotAboveFinalized = errors.New("header slot not above finalized")
|
|
||||||
errHeaderParentNotSeen = errors.New("header parent not seen")
|
|
||||||
)
|
|
||||||
|
|
||||||
// validatePartialDataColumnHeader validates a PartialDataColumnHeader per the consensus spec.
|
|
||||||
// Returns (reject, err) where reject=true means the peer should be penalized.
|
|
||||||
// TODO: we should consolidate this with the existing DataColumn validation pipeline.
|
|
||||||
func (s *Service) validatePartialDataColumnHeader(ctx context.Context, header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
|
|
||||||
if header == nil || header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
|
||||||
return false, errHeaderNil // IGNORE
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHeader := header.SignedBlockHeader.Header
|
|
||||||
headerSlot := blockHeader.Slot
|
|
||||||
parentRoot := bytesutil.ToBytes32(blockHeader.ParentRoot)
|
|
||||||
|
|
||||||
// [REJECT] kzg_commitments list is non-empty
|
|
||||||
if len(header.KzgCommitments) == 0 {
|
|
||||||
return true, errHeaderEmptyCommitments
|
|
||||||
}
|
|
||||||
|
|
||||||
// [IGNORE] Not from future slot (with MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
|
||||||
currentSlot := s.cfg.clock.CurrentSlot()
|
|
||||||
if headerSlot > currentSlot {
|
|
||||||
maxDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
|
||||||
slotStart, err := s.cfg.clock.SlotStart(headerSlot)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if s.cfg.clock.Now().Before(slotStart.Add(-maxDisparity)) {
|
|
||||||
return false, errHeaderFromFuture // IGNORE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// [IGNORE] Slot above finalized
|
|
||||||
finalizedCheckpoint := s.cfg.chain.FinalizedCheckpt()
|
|
||||||
startSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if headerSlot <= startSlot {
|
|
||||||
return false, errHeaderNotAboveFinalized // IGNORE
|
|
||||||
}
|
|
||||||
|
|
||||||
// [IGNORE] Parent has been seen
|
|
||||||
if !s.cfg.chain.HasBlock(ctx, parentRoot) {
|
|
||||||
return false, errHeaderParentNotSeen // IGNORE
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Parent passes validation (not a bad block)
|
|
||||||
if s.hasBadBlock(parentRoot) {
|
|
||||||
return true, errHeaderParentInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Header slot > parent slot
|
|
||||||
parentSlot, err := s.cfg.chain.RecentBlockSlot(parentRoot)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get parent slot")
|
|
||||||
}
|
|
||||||
if headerSlot <= parentSlot {
|
|
||||||
return true, errHeaderSlotNotAfterParent
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Finalized checkpoint is ancestor (parent is in forkchoice)
|
|
||||||
if !s.cfg.chain.InForkchoice(parentRoot) {
|
|
||||||
return true, errHeaderNotFinalizedDescendant
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Inclusion proof valid
|
|
||||||
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
|
|
||||||
return true, errHeaderInvalidInclusionProof
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Valid proposer signature
|
|
||||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, parentRoot)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get parent state")
|
|
||||||
}
|
|
||||||
|
|
||||||
proposerIdx := blockHeader.ProposerIndex
|
|
||||||
proposer, err := parentState.ValidatorAtIndex(proposerIdx)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get proposer")
|
|
||||||
}
|
|
||||||
|
|
||||||
domain, err := signing.Domain(
|
|
||||||
parentState.Fork(),
|
|
||||||
slots.ToEpoch(headerSlot),
|
|
||||||
params.BeaconConfig().DomainBeaconProposer,
|
|
||||||
parentState.GenesisValidatorsRoot(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get domain")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := signing.VerifyBlockHeaderSigningRoot(
|
|
||||||
blockHeader,
|
|
||||||
proposer.PublicKey,
|
|
||||||
header.SignedBlockHeader.Signature,
|
|
||||||
domain,
|
|
||||||
); err != nil {
|
|
||||||
return true, errHeaderInvalidSignature
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Expected proposer for slot
|
|
||||||
expectedProposer, err := helpers.BeaconProposerIndexAtSlot(ctx, parentState, headerSlot)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "compute expected proposer")
|
|
||||||
}
|
|
||||||
if expectedProposer != proposerIdx {
|
|
||||||
return true, errHeaderUnexpectedProposer
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil // Valid header
|
|
||||||
}
|
|
||||||
@@ -78,21 +78,11 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
|
|||||||
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
|
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
|
||||||
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
|
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
|
||||||
return &RODataColumnsVerifier{
|
return &RODataColumnsVerifier{
|
||||||
sharedResources: ini.shared,
|
sharedResources: ini.shared,
|
||||||
dataColumns: roDataColumns,
|
dataColumns: roDataColumns,
|
||||||
results: newResults(reqs...),
|
results: newResults(reqs...),
|
||||||
verifyDataColumnsCommitment: func(rc []blocks.RODataColumn) error {
|
verifyDataColumnsCommitment: peerdas.VerifyDataColumnsSidecarKZGProofs,
|
||||||
if len(rc) == 0 {
|
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var sizeHint int
|
|
||||||
if len(rc) > 0 {
|
|
||||||
sizeHint = len(rc[0].Column)
|
|
||||||
}
|
|
||||||
sizeHint *= len(rc)
|
|
||||||
return peerdas.VerifyDataColumnsCellsKZGProofs(sizeHint, blocks.RODataColumnsToCellProofBundles(rc))
|
|
||||||
},
|
|
||||||
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
3
changelog/bastin_fix-genlogs-gitignore-bug.md
Normal file
3
changelog/bastin_fix-genlogs-gitignore-bug.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule.
|
||||||
3
changelog/bastin_fix-logging-issue.md
Normal file
3
changelog/bastin_fix-logging-issue.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
### Changed
|
||||||
|
|
||||||
|
- Fixed the logging issue described in #16314.
|
||||||
3
changelog/bastin_get-version-v2.md
Normal file
3
changelog/bastin_get-version-v2.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
### Added
|
||||||
|
|
||||||
|
- New beacon API endpoint `eth/v2/node/version`.
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user