Compare commits

...

64 Commits

Author SHA1 Message Date
nisdas
ebcc2aeaf7 Eliminate per-validator heap allocations in epoch processing
Replace ReadOnlyValidator interface boxing with direct CompactValidator field
access throughout epoch processing, removing ~23% of all heap allocations
(12.95 GB cumulative per profiling session). Key changes:

- Add ForEachValidator and ApplyToEveryCompactValidator for zero-alloc
  validator iteration (read-only and read-write paths respectively)
- Add EffectiveBalanceAtIndex for direct effective balance reads without
  interface boxing
- Rewrite ProcessEffectiveBalanceUpdates (pre-Electra and Electra+) to
  use ApplyToEveryCompactValidator with two-pass collect-then-mutate
- Rewrite BaseRewardWithTotalBalance to use EffectiveBalanceAtIndex
- Convert InitializePrecomputeValidators, ProcessSlashingsPrecompute,
  ProcessRegistryUpdates and related helpers to use ForEachValidator

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-18 13:32:08 +01:00
Manu NALEPA
39b027c6c2 CoW merkleLayers via ref-counted sharedMerkleLayers
Replace deep-copy triple-nested loop in Copy() with ref-counted
sharedMerkleLayers. The layers are materialized only when
recomputeDirtyFields actually mutates, saving significant allocation
overhead during state copies.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-18 13:26:45 +01:00
Manu NALEPA
76d0b5706e Add changelog 2026-03-17 22:52:17 +01:00
Manu NALEPA
7df70bdffe getVerifyingState: Check in cache before replaying state. 2026-03-17 22:48:33 +01:00
Manu NALEPA
33143139d4 getVerifyingState: optimize state replay for concurrent data column verification. 2026-03-17 22:23:28 +01:00
Manu NALEPA
14d801a4da Add useful logs 2026-03-17 22:23:28 +01:00
Manu NALEPA
be4850ebba Add changelog 2026-03-17 17:24:55 +01:00
Manu NALEPA
cf75e1bc6b StateByRootIfCachedNoCopy: Check also in the epoch boundary state cache. 2026-03-17 17:24:55 +01:00
Manu NALEPA
47d4c915d3 Use state.ReadOnlyBeaconState instead of state.BeaconState when possible.
It avoids useless state copies.
2026-03-17 17:24:52 +01:00
satushh
93f7214b32 Voluntary exit gloas (#16527)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR implements voluntary exit specific logic for gloas builder,
specifically the following ones:

-
[get_pending_balance_to_withdraw_for_builder](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-get_pending_balance_to_withdraw_for_builder)
-
[process_voluntary_exit](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_voluntary_exit)
-
[initiate_builder_exit](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-initiate_builder_exit)
-
[MIN_BUILDER_WITHDRAWABILITY_DELAY](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#time-parameters)

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2026-03-17 15:33:09 +00:00
Manu NALEPA
1bffcc84f4 Beacon state: Replace *ethpb.Validator by CompactValidator (#16535)
**What type of PR is this?**
Optimisation (@nisdas)

**What does this PR do? Why is it needed?**
The beacon state contains the list of all validators that have been at
least once deposited on the network.

| Network  | Active       | Total                | Ratio      |
|----------|-----------|---------------|---------|
| Hoodi      | 1,224,855 | **1,294,521**  | 94.1%   |
| Mainnet  | 948,883   | **2,229,313** | 43.56% |

Currently, the validators are stored in the beacon state like this:
```go
type BeaconState struct {
...
	validatorsMultiValue                *MultiValueValidators
...
}

type MultiValueValidators = multi_value_slice.Slice[*ethpb.Validator]

type Validator struct {
	state                      protoimpl.MessageState                                            `protogen:"open.v1"`
	PublicKey                  []byte                                                            `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" spec-name:"pubkey" ssz-size:"48"`
	WithdrawalCredentials      []byte                                                            `protobuf:"bytes,2,opt,name=withdrawal_credentials,json=withdrawalCredentials,proto3" json:"withdrawal_credentials,omitempty" ssz-size:"32"`
	EffectiveBalance           uint64                                                            `protobuf:"varint,3,opt,name=effective_balance,json=effectiveBalance,proto3" json:"effective_balance,omitempty"`
	Slashed                    bool                                                              `protobuf:"varint,4,opt,name=slashed,proto3" json:"slashed,omitempty"`
	ActivationEligibilityEpoch github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch `protobuf:"varint,5,opt,name=activation_eligibility_epoch,json=activationEligibilityEpoch,proto3" json:"activation_eligibility_epoch,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"`
	ActivationEpoch            github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch `protobuf:"varint,6,opt,name=activation_epoch,json=activationEpoch,proto3" json:"activation_epoch,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"`
	ExitEpoch                  github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch `protobuf:"varint,7,opt,name=exit_epoch,json=exitEpoch,proto3" json:"exit_epoch,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"`
	WithdrawableEpoch          github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch `protobuf:"varint,8,opt,name=withdrawable_epoch,json=withdrawableEpoch,proto3" json:"withdrawable_epoch,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Epoch"`
	unknownFields              protoimpl.UnknownFields
	sizeCache                  protoimpl.SizeCache
}
```

Some fields, used only by protobuf (`state`, `unknownFields`,
`sizeCache`) are not used by the beacon node.
Some other fields, stored as slices (`PublicKey`,
`WithdrawalCredentials`) need extra memory (`ptr+len+cap`) while they
could be stored as arrays because their sizes are fixed and known in
advance.

We define a new custom type called `CompactValidator`, which is a
fixed-size, pointer-free representation of a validator:

```go
type CompactValidator struct {
	PublicKey                  [fieldparams.BLSPubkeyLength]byte // 48 bytes
	WithdrawalCredentials      [32]byte                          // 32 bytes
	EffectiveBalance           uint64                            // 8 bytes
	Slashed                    bool                              // 1 byte
	ActivationEligibilityEpoch primitives.Epoch                  // 8 bytes
	ActivationEpoch            primitives.Epoch                  // 8 bytes
	ExitEpoch                  primitives.Epoch                  // 8 bytes
	WithdrawableEpoch          primitives.Epoch                  // 8 bytes
}
```

Here is the comparison, per validator, between storing a
`*ethpb.Validator` and a `CompactValidator`.

| Component | `*ethpb.Validator` | `CompactValidator` | Wasted |

|------------------------------------------|--------------------|--------------------|---------|
| Pointer to struct | 8 B | 0 B | 8 B |
| `state` (protoimpl.MessageState) | 8 B | 0 B | 8 B |
| `PublicKey` slice header (ptr+len+cap) | 24 B | 0 B | 24 B |
| `PublicKey` backing array (heap) | 48 B | 48 B (inline) | 0 B |
| `PublicKey` malloc overhead | ~16 B | 0 B | ~16 B |
| `WithdrawalCredentials` slice header | 24 B | 0 B | 24 B |
| `WithdrawalCredentials` backing array | 32 B | 32 B (inline) | 0 B |
| `WithdrawalCredentials` malloc overhead | ~16 B | 0 B | ~16 B |
| `EffectiveBalance` (uint64) | 8 B | 8 B | 0 B |
| `Slashed` (bool + padding) | 8 B | 8 B | 0 B |
| `ActivationEligibilityEpoch` | 8 B | 8 B | 0 B |
| `ActivationEpoch` | 8 B | 8 B | 0 B |
| `ExitEpoch` | 8 B | 8 B | 0 B |
| `WithdrawableEpoch` | 8 B | 8 B | 0 B |
| `unknownFields` ([]byte header) | 24 B | 0 B | 24 B |
| `sizeCache` (int32 + padding) | 8 B | 0 B | 8 B |
| Struct malloc overhead | ~16 B | 0 B | ~16 B |
| **Total** | **~264 B** | **128 B** | **~136 B (52%)** |

We waste `~136 B` per validator.
With a total of `2,229,313` validators (mainnet), it represents `~290
MB`.
Storing `CompactValidator` instead of `*ethpb.Validator` also reduces
the garbage collection pressure.

The following graph represents, for a Hoodi supernode (200 validators
managed):
1. The heap memory usage (`go_memstats_alloc_bytes`) without this PR
(`ref`) and with this PR (`current`)
2. `1.`, but averaged over the latest four hours.
3. The diff of the graphs in `2`. (The bigger, the better)

> [!NOTE]
> The improvement is, after stabilisation, **~300MB-450MB**,
representing **~8%-11%**.
>
> <img width="1028" height="917" alt="image"
src="https://github.com/user-attachments/assets/9179d9b1-bc50-4248-9f47-82a7646d58ab"
/>
>
> For some reasons, the gain is higher than initialy expected.

**For the reviewers:**
This PR should be reviewed commit by commit:
Commits 1 and 2 are independent and unrelated to this PR
Commit 3 is an oversight of:
- https://github.com/OffchainLabs/prysm/pull/16420

Commit 4 implements the `CompactValidator`
Commit 5 uses the `CompactValidator`. The key change is:
```go
// MultiValueValidators is a multi-value slice of compact validators.
type MultiValueValidators = multi_value_slice.Slice[stateutil.CompactValidator]
```

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2026-03-17 13:38:58 +00:00
terence
7728ad4aa2 p2p: add signed proposer preferences gossip topic and verification (#16537)
This PR Implements the `signed_proposer_preferences` gossip topic from
the Gloas spec. Proposers broadcast their `fee_recipient` and `gas_limit
preferences` for upcoming slots, which are cached for downstream bid
validation.

- Gossip topic registration, mapping, and subscriber for
`proposer_preferences`
- Verification: next-epoch check, valid proposal slot, signature
validation
- Slot-keyed `ProposerPreferencesCache` with pruning in the fork watcher
  - Protobuf `ProposerPreferences` and `SignedProposerPreferences`

  > **Note:** This is PR 1 of 3 in a stack. Please review in order:
  > 1. **This PR** — proposer preferences P2P
  > 2. Proposer preferences RPC endpoint
  > 3. Execution payload bid P2P
2026-03-17 00:12:37 +00:00
terence
1362654669 proposer: use correct access root to get pre state (#16496) 2026-03-16 18:06:31 +00:00
dkutzmarks-rgb
3cec3997f8 ci: add SBOM export workflow (#16531)
## Summary
- Adds CycloneDX SBOM generation via cdxgen and uploads to Dependency
Track
- Runs on push to the default branch and weekly (randomized schedule)

## Details
- SBOM format: CycloneDX 1.6 (required by Dependency Track)
- Generator: cdxgen v12.1.1 (Docker image)
- Runner: `ubuntu-latest`
- Skips SBOM generation if no commits in the last 7 days

---------

Co-authored-by: Preston Van Loon <preston@pvl.dev>
2026-03-16 15:46:59 +00:00
terence
1236519810 fix: guard against fork boundary IsParentBlockFull false positive (#16510)
`UpgradeToGloas` initializes both `latestExecutionPayloadBid.BlockHash`
and `latestBlockHash` to the last Fulu EL hash. If the first Gloas block
(or any subsequent block in a run of empty-variant slots) has no bid,
`process_execution_payload_bid` never runs, `bid.Slot` stays `0`, and
the two fields remain equal

`IsParentBlockFull() = true`. `saveHead` and `lateBlockTasks` both used
this result to pick EL block hash as the state access key, but the state
was only ever saved under the beacon block root
2026-03-16 14:55:40 +00:00
Manu NALEPA
36052ed1bb Reduce log noise by returning nil error when ignoring already-seen data column sidecars during gossip validation (#16536)
**What type of PR is this?**
Other

**What does this PR do? Why is it needed?**
Starting at:
- https://github.com/OffchainLabs/prysm/pull/16515

For a super node, the
> data column sidecar already seen

log takes almost **50%** of the whole debug log volume.

The reason is, previously, if a given data column sidecar was already
seen, the message was ignored with the corresponding `err == nil`.
Starting at this PR, the message is still ignored but `err != nil`.
The direct consequence is the corresponding error is printed on the
console.

This present pull request sets back to `nil` the corresponding error.
The rationale is receiving multiple times the same sidecar from
different peers is an expected and normal behavior.
==> We don't need to consider this event as an error.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-14 22:42:59 +00:00
terence
458d4ebe54 Use read only validator accessor in IsPayloadTimelyCommittee (#16530)
This PR replaces `ValidatorAtIndex` with `ValidatorAtIndexReadOnly` in
`IsPayloadTimelyCommittee` to avoid copying the entire validator on each
call

CPU and heap profiling of ePBS devnet nodes shows `PayloadCommittee` →
`CopyValidator` as the single largest hotspot, consuming ~18-20% of CPU
time across all servers. `IsPayloadTimelyCommittee` calls
`ValidatorAtIndex` in a loop over candidate committee members, and each
call deep-copies the full validator struct (~1 KB) only to read
`EffectiveBalance`

`ValidatorAtIndexReadOnly` returns a read-only reference to the
underlying validator, eliminating the copy. The only field accessed is
`EffectiveBalance()`, which is available on the read-only interface
2026-03-13 20:23:10 +00:00
james-prysm
8680f3f8bb adding grpc endpoints for attester, proposer, sync duties, and ptc duties (#16416)
<!-- Thanks for sending a PR! Before submitting:

1. If this is your first PR, check out our contribution guide here
https://docs.prylabs.network/docs/contribute/contribution-guidelines
You will then need to sign our Contributor License Agreement (CLA),
which will show up as a comment from a bot in this pull request after
you open it. We cannot review code without a signed CLA.
2. Please file an associated tracking issue if this pull request is
non-trivial and requires context for our team to understand. All
features and most bug fixes should have
an associated issue with a design discussed and decided upon. Small bug
   fixes and documentation improvements don't need issues.
3. New features and bug fixes must have tests. Documentation may need to
be updated. If you're unsure what to update, send the PR, and we'll
discuss
   in review.
4. Note that PRs updating dependencies and new Go versions are not
accepted.
   Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->

**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

~~DEPENDS ON https://github.com/OffchainLabs/prysm/pull/16402~~

Adding grpc endpoints for attester, proposer, and sync duties. This pr
doesn't utilize the apis.
In a future pr we will include a transition from using duties v2
endpoint to the split duties endpoints for the fork.

TESTING

Both the GetDutiesV2 endpoint and the new ones added (
GetAttesterDuties, GetProposerDutiesV2, GetSyncCommitteeDuties,
GetPTCDuties) use the same underlying helper function under
coreservice.duty function, so all core duties should be the same. Note
that the new gRPC endpoints use validator indices instead of pubkeys to
match REST api endpoints.

Using grpcurl (install with brew install grpcurl):
```
  # GetDutiesV2 - composite endpoint
  # Note: public_keys are base64-encoded BLS pubkeys
  grpcurl -plaintext -d '{
    "epoch": '$EPOCH',
    "public_keys": ["<base64_pubkey_1>", "<base64_pubkey_2>"]
  }' localhost:4000
  ethereum.eth.v1alpha1.BeaconNodeValidator/GetDutiesV2
```

```
  # GetAttesterDuties
  grpcurl -plaintext -d '{
    "epoch": '$EPOCH',
    "validator_indices": [0, 1, 2, 3, 4]
  }' localhost:4000
  ethereum.eth.v1alpha1.BeaconNodeValidator/GetAttesterDuties
```

```
  # GetProposerDutiesV2
  grpcurl -plaintext -d '{
    "epoch": '$EPOCH'
  }' localhost:4000
  ethereum.eth.v1alpha1.BeaconNodeValidator/GetProposerDutiesV2
```

```
  # GetSyncCommitteeDuties
  grpcurl -plaintext -d '{
    "epoch": '$EPOCH',
    "validator_indices": [0, 1, 2, 3, 4]
  }' localhost:4000 ethereum.eth.v1alpha1.BeaconNodeValidator/Get
  SyncCommitteeDuties
```

```
  # GetPTCDuties (Gloas+ only)
  grpcurl -plaintext -d '{
    "epoch": '$EPOCH',
    "validator_indices": [0, 1, 2, 3, 4]
  }' localhost:4000
  ethereum.eth.v1alpha1.BeaconNodeValidator/GetPTCDuties
```

  Verification Checklist

  1. Attester duty data match: For each validator in
  GetDutiesV2.CurrentEpochDuties, confirm attester_slot,
  committee_index, committee_length, committees_at_slot,
  validator_committee_index match the corresponding AttesterDuty
  from GetAttesterDuties

  2. Proposer duty data match: For each validator with non-empty
  proposer_slots in GetDutiesV2, confirm those slots appear in
  GetProposerDutiesV2 response for that validator index
  3. Sync committee flag match: For each validator where
  is_sync_committee=true in GetDutiesV2, confirm that validator
  appears in GetSyncCommitteeDuties response

  4. PTC duty data match: For each validator with non-empty
  ptc_slots in GetDutiesV2, confirm those slots appear in
  GetPTCDuties response for that validator index

  5. Dependent root — attester:
  GetDutiesV2.PreviousDutyDependentRoot ==
  GetAttesterDuties.dependent_root (for the same epoch)

  6. Dependent root — proposer pre-Fulu:
  GetDutiesV2.CurrentDutyDependentRoot ==
  GetProposerDutiesV2.dependent_root

7. Dependent root — proposer post-Fulu: ( DutiesV2 still has a bug that
doesn't take into account the proposer look ahead)
  GetDutiesV2.CurrentDutyDependentRoot ≠
  GetProposerDutiesV2.dependent_root. The V2 value should match
  GetAttesterDuties.dependent_root instead (both use (E-1)_start - 1)
  
  8. Dependent root — PTC: GetPTCDuties.dependent_root ==
  GetAttesterDuties.dependent_root (both use
  AttestationDependentRoot)

  9. Next epoch: Repeat checks 1-4 using
  GetDutiesV2.NextEpochDuties vs individual endpoints queried
  with epoch+1

  10. Edge cases: Epoch 0, epoch 1, sync committee period
  boundary, pre-Gloas epoch for PTC (should error)


**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-13 19:36:21 +00:00
terence
416c49e6d5 metrics: add initial gloas metric (#16519)
This PR adds an initial gloas metric

**Forkchoice**
- Add `forkchoice_payload_inserted_count`
- Add `forkchoice_payload_empty_node_count`
- Add `forkchoice_payload_full_node_count`
- Add `forkchoice_ptc_vote_count`

These metrics show how forkchoice evolves under Gloas:
- how many payload nodes are inserted
- how many currently tracked nodes represent empty payloads vs full
payloads
- how many PTC votes forkchoice receives

**Blockchain**
- Add `beacon_execution_payload_envelope_valid_total`
- Add `beacon_execution_payload_envelope_invalid_total`
- Add `beacon_execution_payload_envelope_processing_duration_seconds`
- Add `beacon_late_payload_task_triggered_total`

These metrics track the beacon node’s envelope handling:
- whether received execution payload envelopes are accepted or rejected
- how long envelope processing takes end-to-end
- how often the late payload task fires during the slot

**Sync**
- Add `sync_payload_attestation_arrival_delay_seconds`
- Add `sync_execution_payload_envelope_arrival_delay_seconds`
- Add `sync_payload_envelope_by_range_served_total`
- Add `sync_payload_envelope_by_root_served_total`
- Add `gloas_execution_payload_envelopes_rpc_requests_total{rpc,result}`

These metrics focus on network timing and RPC serving:
- how long after slot start payload attestations and payload envelopes
arrive over gossip
- how often payload envelope RPC requests are served
- how payload-envelope RPC requests break down by method and result

For gossip validation, only arrival delay remains:
- payload attestation
- execution payload envelope
- data column sidecar continues to use the existing generic arrival
metric

Custom sync-side gossip result counters were removed because libp2p
already accounts for validation outcomes.

**Payload Attestation Pool**
- Add `payload_attestation_pool_size`

This metric shows the current size of the payload attestation pool,
which is useful for seeing whether attestations are being accumulated or
drained as expected.

**Validator RPC**
- Add `gloas_payload_id_cache_total{result}`

This metric shows Gloas proposer payload ID cache behavior:
- cache hits
- cache misses

That helps explain whether proposers are reusing cached EL payload IDs
or falling back to fresh forkchoice updates.

**Validator Client**
- Add `validator_payload_attestation_submission_total{result}`
- Add `validator_self_build_envelope_submission_total{result}`

These metrics show validator-side Gloas behavior:
- whether payload attestation submissions succeed or fail
- whether self-build envelope submissions succeed or fail

**Core Gloas**
- Add `gloas_builder_pending_payments_processed_total`
- Add `gloas_builder_deposits_processed_total`

These metrics show builder-economics state transitions:
- how many pending builder payments are promoted into pending
withdrawals
- how many builder-related deposit requests are processed

**State Native**
- Add `gloas_execution_payload_availability_ratio`
- Add `gloas_builder_pending_withdrawals_count`
- Add `gloas_builder_pending_withdrawals_gwei`
- Add `gloas_payload_expected_withdrawals_count`
- Add `gloas_active_builders_count`
- Add `gloas_active_builders_balance_gwei`

These metrics expose the current Gloas state snapshot:
- how much of the payload availability window is marked available
- how many builder pending withdrawals exist and their total value
- how many expected withdrawals are cached for the next payload
- how many builders are currently active and their aggregate balance

These are emitted from `RecordStateMetrics()`
2026-03-13 16:07:01 +00:00
terence
6605dfbd50 forkchoice: fix forkchoice balance underflow when att slot change (#16520)
In ePBS, `resolveVoteNode` routes validator balances to two different
accumulators based on the attestation slot:

When a validator re-attests for the **same block** in a **new epoch**
(assigned to a different slot), the root and payload status are
unchanged. The trigger condition:

  ```go
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance ||
vote.currentPayloadStatus != vote.nextPayloadStatus {
```
  ...does not fire, so the balance is never moved. But the vote rotation silently updates currentSlot to the new value. On the next vote change, the subtraction targets the wrong accumulator (which has balance=0), causing the underflow.

  Examples:
  1. Epoch 2: Validator attests at slot 95 for block B (slot 95).
  pending = (95 == 95) = true → 32 ETH added to `Node.balance`
  2. Epoch 3: Validator re-attests at slot 96 for same block `B.Root/payloadStatus` went from pending to empty → not reprocessed. And currentSlot rotated to 96.
  3. Epoch 4: Validator attests for block C. Subtract from B that's no longer pending but has zero balance which triggers underflow
2026-03-13 15:32:25 +00:00
Manu NALEPA
84993fdd68 Fix TestProcessPendingDepositsMultiplesSameDeposits to properly test that duplicate pending deposits for the same key top up a single validator instead of creating duplicates. (#16529)
**What type of PR is this?**
Bug fix (in tests)

**What does this PR do? Why is it needed?**
Fix `TestProcessPendingDepositsMultiplesSameDeposits` to properly test
that duplicate pending deposits for the same key top up a single
validator instead of creating duplicates.

Previously, this test used 2 validators with the same pubkey.

**Acknowledgements**
- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-13 15:01:19 +00:00
james-prysm
1e916418f2 changed /eth/v1/beacon/execution_payload_envelope/{block_root} to /eth/v1/beacon/execution_payload_envelope/{block_id} defined in beacon apis (#16521)
**What type of PR is this?**

 Bug fix

**What does this PR do? Why is it needed?**

missed this in review

we should have /eth/v1/beacon/execution_payload_envelope/{block_root} as
/eth/v1/beacon/execution_payload_envelope/{block_id} defined in beacon
apis


https://github.com/ethereum/beacon-APIs/blob/master/apis/beacon/execution_payload/envelope_get.yaml

missed in https://github.com/OffchainLabs/prysm/pull/16441

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-13 14:12:42 +00:00
terence
8d5d584cf8 sync,verification: add Gloas data column gossip validation path (#16515)
This PR adds a dedicated Gloas `data_column_sidecar` gossip validation
path and aligns it with the EIP-7732 spec.

The validation flow is now split cleanly:
- Fulu stays in the existing `validate_data_column` path
- Gloas moves into `validate_data_column_gloas`
- Gloas-specific checks live in a dedicated `GloasDataColumnVerifier`

**What Changed**
- Add a separate Gloas data column gossip validator in sync
- Add `GloasDataColumnVerifier` in `verification`
- Validate Gloas sidecars against
`block.body.signed_execution_payload_bid.message.blob_kzg_commitments`
- Refactor KZG proof verification to accept commitments explicitly
instead of rewriting the sidecar
- Use Gloas dedupe semantics: `(beacon_block_root, index)`
- Add spec-mapped comments next to the Gloas checks
- Add a TODO for deferred queue/revalidation when the block has not yet
been seen

**Spec Mapping**

The Gloas path now follows this order:
1. `IGNORE` if the block for `sidecar.beacon_block_root` has not been
seen
2. `REJECT` if `sidecar.slot != block.slot`
3. `REJECT` if `verify_data_column_sidecar(sidecar,
bid.blob_kzg_commitments)` fails
4. `REJECT` if the sidecar is on the wrong subnet
5. `REJECT` if `verify_data_column_sidecar_kzg_proofs(sidecar,
bid.blob_kzg_commitments)` fails
6. `IGNORE` if `(sidecar.beacon_block_root, sidecar.index)` has already
been seen

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2026-03-12 22:50:26 +00:00
satushh
598509ffa8 Remove next epoch lookahead from PTC duties (#16517)
**What does this PR do? Why is it needed?**

Implemented the spec change:
https://github.com/ethereum/beacon-APIs/pull/586

(Removed next epoch lookahead from PTC duties)
2026-03-12 16:43:32 +00:00
terence
0fbd643c02 forkchoice: fix head change on every attestation tick pre gloas (#16508)
`store.go` creates a `fullNodeByRoot` entry with `full=true` for all
fulu blocks because pre-Ggoas blocks embed their EL payload inline and
need optimistic validation tracking. `FullHead` was surfacing this
internal `full=true` to callers without distinguishing whether the node
was actually post-gloas.

`UpdateHead` called `FullHead` which returned `full=true` for fulu
blocks. Meanwhile `saveHead` always stored `s.head.full=false`for
pre-gloas states. The mismatch caused `isNewHead` to fire on ticker
interval.

I liked the option to fix in `FullHead`, it's a simple change. The con
is `CanonicalNodeAtSlot` is a different path that also returns `pn.full`
for pre-gloas, t's already call-site guarded by `GloasForkEpoch` so it's
fine, but the internal inconsistency remains.

**Option 2** is Fix in `saveHead` for fulu
```go
if headState.Version() >= version.Gloas {
} else {
    full = true
}
```
This aligns `s.head.full` with what `FullHead` returns

**Option 3** is Fix in `isNewHead` (ignore full pre-gloas)
```go
    postGloas := slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().GloasForkEpoch
    fullChanged := postGloas && full != currentFull
    return r != currentHeadRoot || fullChanged || r == [32]byte{}
```

**Option 4** is Fix in `store.go` to not expose full=true pre gloas,
which change `choosePayloadContent` to return the empty node for pre
gloas. I haven't analyzed this one deeply to understand its downstream
effect
```go
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
    fn := s.fullNodeByRoot[n.root]
    en := s.emptyNodeByRoot[n.root]
    if fn == nil {
        return en
    }
    if fn.full && slots.ToEpoch(n.slot) < params.BeaconConfig().GloasForkEpoch {
        return en
    }
    ...
```
2026-03-11 23:29:31 +00:00
james-prysm
0e4f3231d2 adding payload_attestation_message event (#16506)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

adds payload_attestation_message event triggered via grpc and gossip
message received, introduced in
https://github.com/ethereum/beacon-APIs/pull/552

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-11 16:24:53 +00:00
james-prysm
b17f2752ab adding payload attestation apis connection from validator client (#16504)
pr replaces grpc connection stub with actual call to api endpoints
2026-03-10 18:52:50 +00:00
Potuz
932e5eb7d8 Use execution block hash as state access key post-Gloas (#16459)
Post-Gloas, SaveState and the next-slot cache are keyed by execution
block hash as well of beacon block root. Thread an accessRoot through
getStateAndBlock (for StateByRoot) and getPayloadAttribute (for
ProcessSlotsUsingNextSlotCache) so each caller can supply the right
key.

Add FullHead to forkchoice: it returns the head root, the block hash
of the nearest full payload (walking up to an ancestor when the head
is empty), and whether the head itself is full. This gives callers
enough information to derive the correct accessRoot.

Rework UpdateHead to call FullHead and branch on post-Gloas: the
Gloas path computes accessRoot from the full block hash and sends FCU
via notifyForkchoiceUpdateGloas; the legacy path keeps the existing
forkchoiceUpdateWithExecution flow. saveHead and pruneAttsFromPool
now use local variables instead of the removed shared fcuArgs.

Flatten the else branch in lateBlockTasks so the Gloas path returns
early and the pre-Gloas path runs at the top indentation level.

In postPayloadHeadUpdate pass the envelope's block hash as accessRoot
since the state was just saved under that key.
2026-03-10 17:33:18 +00:00
Jun Song
de233438f1 Remove unnecessary memory allocation while encoding in JSON at GetBeaconStateV2 (#16485)
**What type of PR is this?**

> Other: Optimization

**What does this PR do? Why is it needed?**

In `GetBeaconStateV2`, an extra `json.Marshal` call is used for type
checking, which allocates `[]byte` **twice** so the total memory usage
is increased by the size of `BeaconState`. Currently, the `BeaconState`
in mainnet exceeds over 250MB, so I think this can be a good
optimization for nodes who serve `BeaconState` periodically.

**Which issues(s) does this PR fix?**

N/A

**Other notes for review**

I used [this benchmark
script](https://gist.github.com/syjn99/499eaae60e37447297bb568280ab2b64).

Before:

```
Running tool: /opt/homebrew/bin/go test -test.fullpath=true -benchmem -run=^$ -bench ^BenchmarkGetBeaconStateV2$ github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/debug

goos: darwin
goarch: arm64
pkg: github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/debug
cpu: Apple M4 Pro
BenchmarkGetBeaconStateV2/validators_64-12         	      49	  24245383 ns/op	42510257 B/op	  250511 allocs/op
BenchmarkGetBeaconStateV2/validators_100000-12     	       5	 225380608 ns/op	347510451 B/op	 1849520 allocs/op
BenchmarkGetBeaconStateV2/validators_1000000-12    	       1	2437710417 ns/op	2901637784 B/op	16249530 allocs/op
PASS
ok  	github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/debug	7.431s
```

After:
```
Running tool: /opt/homebrew/bin/go test -test.fullpath=true -benchmem -run=^$ -bench ^BenchmarkGetBeaconStateV2$ github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/debug

goos: darwin
goarch: arm64
pkg: github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/debug
cpu: Apple M4 Pro
BenchmarkGetBeaconStateV2/validators_64-12         	     129	   8947429 ns/op	34877098 B/op	  250497 allocs/op
BenchmarkGetBeaconStateV2/validators_100000-12     	      12	  90652486 ns/op	254957336 B/op	 1849493 allocs/op
BenchmarkGetBeaconStateV2/validators_1000000-12    	       1	1182659125 ns/op	2475831984 B/op	16249524 allocs/op
PASS
ok  	github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/debug	5.816s
```

Summary:
```
validators_64 ->
Time: 24.2ms → 8.9ms (2.71x faster, −63%)
Memory: 42.5MB → 34.9MB (−18%)

validators_100,000 ->
Time: 225ms → 91ms (2.49x faster, −60%)
Memory: 347.5MB → 255.0MB (−26.6%)

validators_1,000,000 ->
Time: 2,438ms → 1,183ms (2.06x faster, −51%)
Memory: 2,901MB → 2,476MB (−14.7%)
```

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-03-10 15:37:04 +00:00
Potuz
e35f6c351a Handle missing Payloads (#16460)
In the event of a late missing Payload, we add a helper that updates the
beacon block post state in the NSC as well as handling the boundary
caches and calling FCU if we are proposing next slot.
2026-03-10 13:59:50 +00:00
terence
4da5ed072c proposer: add payload attestations packing (#16493)
This PR adds packing for payload attestations for a beacon block. It
filters payload attestations for slot and beacon root checks
2026-03-10 02:27:03 +00:00
james-prysm
dec4b43b3e fix to attestation pool to only consume on insert (#16503)
**What type of PR is this?**
Bug fix

**What does this PR do? Why is it needed?**

fixes bug on payload attestation pool consuming on reads, when we should
only prune on insert

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-10 00:02:52 +00:00
james-prysm
17ea45a011 adding the duties stores for validator duties processing (#16479)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

This PR refactors the way we store different validator duties into a
duties store for easier splitting of tasks and in a future pr processing
duties for split endpoints. This PR will reduce the number of changes
when we start calling the different endpoints introduced in
https://github.com/OffchainLabs/prysm/pull/16416

pr is part of https://github.com/OffchainLabs/prysm/pull/16421

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2026-03-09 21:44:14 +00:00
terence
1934afac73 sync: wire payload attestations into pending pool (#16492)
This PR wires payload attestations into the pending pool arrived over
sync gossip pipeline
2026-03-09 18:32:10 +00:00
james-prysm
d0c9a31657 Validator client ptc actions (#16407)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR introduces PTC attestations including getting the attestation
data and signing and submitting, the getting and submitting are stubbed
out for now as the APIs are not implemented /merged yet.

Tested in kurtosis to verify no regressions 

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-09 16:39:47 +00:00
terence
66c70200ee duties: wire PTC slot assignments into validator duty pipeline (#16489)
This PR adds end-to-end PTC support for validator duties by adding PTC
duty support in duties v2:
  - introduced repeated PTC slot assignments per validator
- cpmputed assignments by scanning epoch slots and checking
`gloas.PayloadCommittee`
  - propagated PTC slots through server/adapter duty responses
2026-03-09 14:20:13 +00:00
Bastin
928a874e4a add log.go files to runtime (#16491)
**What does this PR do? Why is it needed?**
This PR adds the `runtime` package and it's subpackages (except
`runtime/logging`) to the log generation process. which means they get
an additional `log.go` file with the package path as a field.

**Other notes for review**
- `runtime/logging` is an exception because adding it to the log gen
process messes with the internals of the package, since it's handling
the logging formats, etc...

- to test this, you can look at the logs emitted from these packages
before and after this PR. they will have a field `prefix` before this
PR, and no `prefix` after. The most obvious one is the runtime
(`registry`) package itself which usually emits a log at the start and
end of beacon node.

This is as part of my goal to remove all `prefix` logs and move them to
the new `package_path` format.
2026-03-08 21:09:40 +00:00
Bastin
ed8a3351aa support for calling debug/states/{id} with pre-payload roots (#16466)
in `rpc/lookup/stater`, when trying to fetch a state using a state root,
we only look in the `state.StateRoots` .
in gloas that would only account for post-payload roots. 

This PR adds a non-optimal way to support fetching states using
pre-payload state roots (dora does this).

I'm running a local devnet so I can see how long it takes for the stater
to respond in the worst case. but I think it should be fine for beacon
API.
2026-03-08 19:51:44 +00:00
terence
5b95d11c5e rpc: allow execution payload envelope lookup by block ID (#16495)
fixes #[16494](https://github.com/OffchainLabs/prysm/issues/16494)
2026-03-08 14:00:52 +00:00
terence
7a4bea0e44 rpc/validator: implement PayloadAttestationData and SubmitPayloadAttestation gRPC endpoints (#16487)
- This PR adds the payload attestation prototype flow. 
- On the proto side it introduces `PayloadAttestationDataRequest` and
two RPC methods on `BeaconNodeValidator`: `PayloadAttestationData` and
`SubmitPayloadAttestation`.
- The `ForkchoiceFetcher` interface is extended with
`HighestReceivedBlockRoot() [32]byte`, which returns the beacon block
root at the highest received slot and is used to determine which block
PTC members attest to, and `HasFullNode([32]byte) bool`, which indicates
whether a full payload node exists for a given root and determines the
`PayloadPresent` field in the response.
- The RPC server implementation lives in
`beacon-chain/rpc/prysm/v1alpha1/validator/payload_attestation.go`.
`PayloadAttestationData` validates the current slot, checks sync status,
and returns attestation data using the forkchoice helpers, while
`SubmitPayloadAttestation` validates the request, broadcasts it over
P2P, applies it locally through `ReceivePayloadAttestationMessage`, and
inserts it into the payload attestation pool. Wiring adds
`PayloadAttestationPool` and `PayloadAttestationReceiver` to `Server`,
`rpc.Config`, and `BeaconNode`, with the pool instantiated in
`node.New()`.
- Both RPCs are marked `deprecated = true` since this prototype will
eventually move to the beacon REST API.
2026-03-07 04:57:28 +00:00
terence
e899003973 operations/payloadattestation: add PTC attestation pool (#16486)
Introduces the `payloadattestation` pool package, a thread-safe store
for aggregating `PayloadAttestationMessage`s from PTC members before
they are included in blocks by the `slot N+1` proposer.

PTC validators broadcast `PayloadAttestationMessage`s during slot N
attesting whether the execution payload arrived on time
(`PayloadPresent`) and whether blob data is available
(`BlobDataAvailable`). The slot N+1 proposer collects these and includes
aggregated `PayloadAttestation`s in the block body.

The pool:
- Keys entries by struct — one entry per distinct `(block_root, slot,
payload_present, blob_data_available)` tuple
- Aggregates BLS signatures and bit vectors as messages arrive
- Exposes `PendingPayloadAttestations(slot)` for proposer block
construction. Slot and lower are automatically pruned
2026-03-06 22:50:07 +00:00
james-prysm
e751a74c64 gloas attestation (#16391)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

Adds basic changes for gloas attestations to have updated index value
based on payload availability.

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 19:44:21 +00:00
satushh
6826e77539 Use copy() instead of byte-by-byte loop in MarshalSSZ (#16483)
**What type of PR is this?**

Optimisation

**What does this PR do? Why is it needed?**

Use copy() instead of byte-by-byte loop in MarshalSSZ
Similar to https://github.com/OffchainLabs/prysm/pull/16222
2026-03-06 18:01:55 +00:00
Jun Song
15c178ef0c Add Close() call for resources (http response, file descriptor) in e2e package (#16481)
**What type of PR is this?**

> Bug fix

**What does this PR do? Why is it needed?**

Some resources in e2e package didn't get closed after usage.

**Which issues(s) does this PR fix?**

N/A

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-03-06 17:39:25 +00:00
Jun Song
e115137591 Fix file descriptor leaks in CopyFile (#16480)
**What type of PR is this?**

> Bug fix

**What does this PR do? Why is it needed?**

Close file descriptors for both source/destination files in `CopyFile`
function.

**Which issues(s) does this PR fix?**

N/A

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 17:33:58 +00:00
Manu NALEPA
dc62271ebb Use InitializeFromProtoUnsafeXXX instead of InitializeFromProtoXXX when possible. (Nishant's optimization) (#16420)
**What type of PR is this?**
Optimisation

**What does this PR do? Why is it needed?**
Use `InitializeFromProtoUnsafeXXX` instead of `InitializeFromProtoXXX`
when possible. (@nisdas' change)
This change avoids useless copies.

The difference between these two methods is the latest copies the input
state. However, if the input state is guaranteed not to be modified
after the use of this function (and, in particular, if the input state
is not used at all after the use of this function), then copying it is
unnecessarry. ==> Using the "unsafe" method is fine in this case.

**Other notes for review**
As a reviewer, you should ensure that when the unsafe method is used,
then the input state is never modified after the use of the function.

This PR is much easier to review directly in VSCode, because it displays
more context by default.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-06 15:46:34 +00:00
Manu NALEPA
3ec505bc22 Periodically evict finalized states in checkpoint states cache (#16458)
**What type of PR is this?**
Bug fix

**What does this PR do? Why is it needed?**
This PR evicts finalized states in checkpoint states cache.

States are efficiently stored in caches, especially thanks to multi
value slices. If 1 state takes 300 MB and 2 states that are really
similar are stored in a cache, then these 2 states could only need let's
say 310MB of cache memory (instead of 300 MB x 2 = 600 MB).

**Before the commit creating the memory issue**, new states were
regularly stored in the `CheckpointStateCache`. This cache has 10 slots.
After this cache is full, oldest values (not quite exactly because it's
a LRU cache) are pruned.

**After the commit creating the memory issue**, new states are quite
rarely inserted into this cache. For example, on a run, almost 5H (!)
were needed before the first value was evicted from this cache. This
mean this cache contains multiple states that do not share a lot of
values with other states in all other caches/head.
==> A lot of fields stay in memory that are exclusively needed for the
(old) states only present in this cache.

The beacon node now evicts finalized states from the cache.

**Which issues(s) does this PR fix?**
- https://github.com/OffchainLabs/prysm/issues/16376

<img width="1022" height="914" alt="image"
src="https://github.com/user-attachments/assets/98886364-001a-48fc-a952-5c6a7e80bf88"
/>

**Acknowledgements**
- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-06 09:39:37 +00:00
Potuz
6be77c0194 Do not reject on blocks unless they are provably bad (#16471)
Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-06 02:20:11 +00:00
Potuz
03fa7042cb Dont fail seting fc (#16478)
Do not fail when setting up the forkchoice checkpoints. Balances will be
wrong, but they self-heal on 2 epochs.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-06 00:13:40 +00:00
terence
c620f29aab fix(sync): do not fail block processing on hot state DB cleanup error (#16473)
During initial sync, `checkSaveHotStateDB` can fail when it tries to
disable hot-state-saving mode and clean up previously saved states. If
one of those states has since become the finalized checkpoint,
`DeleteStates` returns `ErrDeleteJustifiedAndFinalized`, which is
correct behavior from the DB layer, but should not kill block
processing.
This was observed on epbs-devnet-0 at slot ~3742 where the error
cascaded as:
```
Skip processing Gloas blocks error=receive Gloas block at slot 3742: check save hot state db: cannot delete finalized block or state
```
The fix downgrades the error to a log, matching the pattern already used
by `prunePostBlockOperationPools` two lines above. In the long run, we
should remove such scheme saving unfinalized states in DB when we dont
finalize after some epochs this was a work around for medella incident
2026-03-05 18:42:07 +00:00
terence
7a652d7ec6 fix(sync): preserve the full signed execution payload envelope (#16474)
fix(sync): preserve the full signed execution payload envelope in pubsub
validations
2026-03-05 15:24:23 +00:00
Potuz
b59a830dce Gloas/safer pending signature (#16470)
This PR hardens the pending payload structure on a few ways:

- It quickly ignores before signature verification if we already have
many pending payloads
- It ignores rather than reject payloads for the self-build block if
they fail verification, this is because we may get as in devnet-zero
from gossip payloads from unknown branches.
- It adds a counter to prevent being DOS by the previous feature. Since
we are ignoring instead of rejecting those payloads, it only allows 3
such payloads to come. This counter is pruned on finalization.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-05 04:36:57 +00:00
Potuz
6999943c3d Gloas/late block tasks right fcu (#16454)
This PR changes how we update the next slot cache on late blocks. 

- When the latest saved state in the NSC agrees with the head root, then
we update this latest state because it will be useful. Otherwise we
instead update the headstate and put it as latest.
- This also keys the insertion in the NSC post-Gloas with the right
blockhash instead of blockroot if the headstate is based on full. This
exploits a dangerous feature of `IsParentBlockFull` documented in its
godoc.

This PR also updates the cache and the epoch boundary when we process
the payload.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 00:55:07 +00:00
Potuz
90064edd54 Do not request blocks that are being synced (#16468)
When adding a payload to the pending queue, do not send a batch request
if the block is already in forkchoice or being synced.

Similar when processing pending attestations.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-05 00:06:40 +00:00
james-prysm
393eb1e83c Reorganize duties functions (#16457)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

just moving some functions around to reduce duties split pr. part of
https://github.com/OffchainLabs/prysm/pull/16421

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-04 20:47:22 +00:00
Potuz
d2bcf75c50 Gloas/pending payload dependent root (#16461)
Only verify the envelope's signature before putting in the queue if
non-self-building or the proposer is in the lookahead. Otherwise just
ignore.

This still can cause a false rejection in a very edgy case of a builder
that is chosen exactly after being active and we are syncing from afar.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 18:40:11 +00:00
terence
89d3a6c66f Preserve canonical order in writeBlockBatchToStream (#16463)
Summary
- update `writeBlockBatchToStream` to preserve canonical slot order when
a batch contains both blinded and full blocks
- reconstruct blinded blocks before streaming and substitute them by
slot during canonical write pass
2026-03-04 18:17:25 +00:00
Bastin
bf2485eb71 gloas support for debug/beacon/state/ api (#16296)
**What does this PR do? Why is it needed?**
gloas support for debug/beacon/state/ api
2026-03-04 16:41:32 +00:00
terence
a88f60f1fa Apply gloas payload state mutations during stategen replay (#16464)
State replay in gloas needs to reflect post-payload state even when
replay input is a blinded envelope format. Without applying these
mutations during stategen replay, reconstructed states can diverge from
the canonical post-payload state. At the same time, not every block has
an envelope, so replay must remain robust when envelope data is absent

Summary
- apply Gloas execution payload envelope state mutations during stategen
replay when a blinded envelope is available
- extract reusable payload-mutation logic into
`ApplyExecutionPayloadStateMutations` and use it from both normal
payload processing and stategen replay
2026-03-04 16:26:04 +00:00
Potuz
33f899506f Gloas/request parent payload (#16418)
This PR adds sync paths to request by RPC the parent Payload in the
event that we know the incoming block is based on full and we do not
have the payload.

It also adds an invalid payload cache and keeps track of some invalid
ones that we have seen on the blockchain package.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 15:14:36 +00:00
Potuz
f7ead02e6e Make Dora Happy (#16441)
- This PR adds the execution_payload_envelope Beacon API endpoint
supporting both JSON and SSZ
- It add the execution_payload_available event stream 

it also adds as a stub without a trigger the execution_payload_bid event
stream.

These are the minimum necessary to get Dora happy on Kurtosis. Haven't
reviewed this PR yet, so opening it as draft. Only check was in Kurtosis
against Dora.

<img width="1555" height="1177" alt="Screenshot 2026-02-27 at 7 18
01 PM"
src="https://github.com/user-attachments/assets/e888ccf3-68fe-42c0-9f9a-f911438438d1"
/>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 01:12:58 +00:00
james-prysm
0abf17f6cd refactor duties internal into core helper functions (#16402)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

This PR is a small component of
https://github.com/OffchainLabs/prysm/pull/16377 with the goal of
refactoring duties into separate endpoints for gloas. This first step
breaks out helper functions that will be useful and important in
breaking out the apis.

did kurtosis testing and basic bench marks to make sure there's no bad
regression

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: satushh <satushh@gmail.com>
2026-03-03 21:09:23 +00:00
Bastin
8307ee1098 Make committeeIndex parameter optional for GetAttestationData beacon API post Gloas (#16436)
after gloas the parameter `committeeIndex` of the beacon API
`GetAttestationData` is optional.

[ethereum/beacon-APIs#578](https://github.com/ethereum/beacon-APIs/pull/578)
2026-03-03 20:00:07 +00:00
Bastin
7e4a039a7f Gloas API state structs (#16276)
**What does this PR do? Why is it needed?**
Adds Gloas API structs, and conversion functions
(`BeaconStateGloasFromConsensus()`)
2026-03-03 17:56:35 +00:00
487 changed files with 16805 additions and 3741 deletions

View File

@@ -72,8 +72,6 @@ exceptions:
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
- GLOAS_FORK_EPOCH#gloas
- GLOAS_FORK_VERSION#gloas
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
ssz_objects:
@@ -389,10 +387,8 @@ exceptions:
- get_builder_withdrawals#gloas
- get_builders_sweep_withdrawals#gloas
- get_index_for_new_builder#gloas
- get_pending_balance_to_withdraw_for_builder#gloas
- get_proposer_preferences_signature#gloas
- get_upcoming_proposal_slots#gloas
- initiate_builder_exit#gloas
- is_active_builder#gloas
- is_builder_index#gloas
- is_data_available#gloas
@@ -403,7 +399,6 @@ exceptions:
- is_valid_proposal_slot#gloas
- onboard_builders_from_pending_deposits#gloas
- process_deposit_request#gloas
- process_voluntary_exit#gloas
- record_block_timeliness#gloas
- record_block_timeliness#phase0
- verify_data_column_sidecar_kzg_proofs#gloas

67
.github/workflows/sbom-export.yaml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: SBOM Export & Centralize
on:
push:
branches: [ "develop" ]
schedule:
- cron: '50 21 * * 2'
permissions:
contents: read
jobs:
generate-and-upload:
runs-on: ubuntu-latest
steps:
- name: Checkout Source Code
uses: actions/checkout@v6
- name: Check for recent changes
id: check
run: |
if [ -z "$(git log --since='7 days ago' --oneline | head -1)" ]; then
echo "No commits in the last 7 days, skipping SBOM generation."
echo "skip=true" >> "$GITHUB_OUTPUT"
fi
- name: Generate CycloneDX SBOM via cdxgen
if: steps.check.outputs.skip != 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
docker run --rm \
--user "$(id -u):$(id -g)" \
-v /tmp:/tmp \
-v "${{ github.workspace }}:/app:rw" \
-e FETCH_LICENSE=true \
-e GITHUB_TOKEN \
ghcr.io/cdxgen/cdxgen:v12.1.1 \
-r /app \
-o /app/sbom.cdx.json \
--no-install-deps \
--spec-version 1.6
if [ ! -s sbom.cdx.json ]; then
echo "::error::cdxgen SBOM generation failed or returned empty."
exit 1
fi
echo "SBOM generated successfully:"
ls -lh sbom.cdx.json
- name: Upload SBOM to Dependency Track
if: steps.check.outputs.skip != 'true'
env:
DT_API_KEY: ${{ secrets.DEPENDENCY_TRACK_API_KEY }}
DT_URL: ${{ secrets.DEPENDENCY_TRACK_URL }}
run: |
REPO_NAME=${GITHUB_REPOSITORY##*/}
curl -sf -X POST "${DT_URL}/api/v1/bom" \
-H "X-Api-Key: ${DT_API_KEY}" \
-F "autoCreate=true" \
-F "projectName=${REPO_NAME}" \
-F "projectVersion=${{ github.ref_name }}" \
-F "bom=@sbom.cdx.json"
echo "SBOM uploaded to Dependency Track for ${REPO_NAME}@${{ github.ref_name }}"

View File

@@ -9,6 +9,7 @@ go_library(
"conversions_blob.go",
"conversions_block.go",
"conversions_block_execution.go",
"conversions_gloas.go",
"conversions_lightclient.go",
"conversions_state.go",
"endpoints_beacon.go",
@@ -57,10 +58,12 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//consensus-types/blocks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],

View File

@@ -540,6 +540,12 @@ type PayloadAttestation struct {
Signature string `json:"signature"`
}
type PayloadAttestationMessage struct {
ValidatorIndex string `json:"validator_index"`
Data *PayloadAttestationData `json:"data"`
Signature string `json:"signature"`
}
type BeaconBlockBodyGloas struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
@@ -577,3 +583,17 @@ func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
func (s *SignedBeaconBlockGloas) SigString() string {
return s.Signature
}
type ExecutionPayloadEnvelope struct {
Payload *ExecutionPayloadDeneb `json:"payload"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
BuilderIndex string `json:"builder_index"`
BeaconBlockRoot string `json:"beacon_block_root"`
Slot string `json:"slot"`
StateRoot string `json:"state_root"`
}
type SignedExecutionPayloadEnvelope struct {
Message *ExecutionPayloadEnvelope `json:"message"`
Signature string `json:"signature"`
}

View File

@@ -2966,6 +2966,14 @@ func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttesta
}
}
func PayloadAttestationMessageFromConsensus(m *eth.PayloadAttestationMessage) *PayloadAttestationMessage {
return &PayloadAttestationMessage{
ValidatorIndex: fmt.Sprintf("%d", m.ValidatorIndex),
Data: PayloadAttestationDataFromConsensus(m.Data),
Signature: hexutil.Encode(m.Signature),
}
}
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
return &PayloadAttestationData{
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
@@ -3275,3 +3283,26 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
BlobDataAvailable: d.BlobDataAvailable,
}, nil
}
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
if err != nil {
return nil, err
}
var requests *ExecutionRequests
if e.Message.ExecutionRequests != nil {
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
}
return &SignedExecutionPayloadEnvelope{
Message: &ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", e.Message.Slot),
StateRoot: hexutil.Encode(e.Message.StateRoot),
},
Signature: hexutil.Encode(e.Signature),
}, nil
}

View File

@@ -0,0 +1,89 @@
package structs
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func ROExecutionPayloadBidFromConsensus(b interfaces.ROExecutionPayloadBid) *ExecutionPayloadBid {
if b == nil {
return nil
}
pbh := b.ParentBlockHash()
pbr := b.ParentBlockRoot()
bh := b.BlockHash()
pr := b.PrevRandao()
fr := b.FeeRecipient()
commitments := b.BlobKzgCommitments()
blobKzgCommitments := make([]string, 0, len(commitments))
for _, commitment := range commitments {
blobKzgCommitments = append(blobKzgCommitments, hexutil.Encode(commitment))
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(pbh[:]),
ParentBlockRoot: hexutil.Encode(pbr[:]),
BlockHash: hexutil.Encode(bh[:]),
PrevRandao: hexutil.Encode(pr[:]),
FeeRecipient: hexutil.Encode(fr[:]),
GasLimit: fmt.Sprintf("%d", b.GasLimit()),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex()),
Slot: fmt.Sprintf("%d", b.Slot()),
Value: fmt.Sprintf("%d", b.Value()),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment()),
BlobKzgCommitments: blobKzgCommitments,
}
}
func BuildersFromConsensus(builders []*ethpb.Builder) []*Builder {
newBuilders := make([]*Builder, len(builders))
for i, b := range builders {
newBuilders[i] = BuilderFromConsensus(b)
}
return newBuilders
}
func BuilderFromConsensus(b *ethpb.Builder) *Builder {
return &Builder{
Pubkey: hexutil.Encode(b.Pubkey),
Version: hexutil.Encode(b.Version),
ExecutionAddress: hexutil.Encode(b.ExecutionAddress),
Balance: fmt.Sprintf("%d", b.Balance),
DepositEpoch: fmt.Sprintf("%d", b.DepositEpoch),
WithdrawableEpoch: fmt.Sprintf("%d", b.WithdrawableEpoch),
}
}
func BuilderPendingPaymentsFromConsensus(payments []*ethpb.BuilderPendingPayment) []*BuilderPendingPayment {
newPayments := make([]*BuilderPendingPayment, len(payments))
for i, p := range payments {
newPayments[i] = BuilderPendingPaymentFromConsensus(p)
}
return newPayments
}
func BuilderPendingPaymentFromConsensus(p *ethpb.BuilderPendingPayment) *BuilderPendingPayment {
return &BuilderPendingPayment{
Weight: fmt.Sprintf("%d", p.Weight),
Withdrawal: BuilderPendingWithdrawalFromConsensus(p.Withdrawal),
}
}
func BuilderPendingWithdrawalsFromConsensus(withdrawals []*ethpb.BuilderPendingWithdrawal) []*BuilderPendingWithdrawal {
newWithdrawals := make([]*BuilderPendingWithdrawal, len(withdrawals))
for i, w := range withdrawals {
newWithdrawals[i] = BuilderPendingWithdrawalFromConsensus(w)
}
return newWithdrawals
}
func BuilderPendingWithdrawalFromConsensus(w *ethpb.BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
return &BuilderPendingWithdrawal{
FeeRecipient: hexutil.Encode(w.FeeRecipient),
Amount: fmt.Sprintf("%d", w.Amount),
BuilderIndex: fmt.Sprintf("%d", w.BuilderIndex),
}
}

View File

@@ -972,3 +972,223 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
ProposerLookahead: lookahead,
}, nil
}
// ----------------------------------------------------------------------------
// Gloas
// ----------------------------------------------------------------------------
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr := st.HistoricalRoots()
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
srcLookahead, err := st.ProposerLookahead()
if err != nil {
return nil, err
}
lookahead := make([]string, len(srcLookahead))
for i, v := range srcLookahead {
lookahead[i] = fmt.Sprintf("%d", uint64(v))
}
// Gloas-specific fields
lepb, err := st.LatestExecutionPayloadBid()
if err != nil {
return nil, err
}
builders, err := st.Builders()
if err != nil {
return nil, err
}
nwbi, err := st.NextWithdrawalBuilderIndex()
if err != nil {
return nil, err
}
epa, err := st.ExecutionPayloadAvailabilityVector()
if err != nil {
return nil, err
}
bpp, err := st.BuilderPendingPayments()
if err != nil {
return nil, err
}
bpw, err := st.BuilderPendingWithdrawals()
if err != nil {
return nil, err
}
lbh, err := st.LatestBlockHash()
if err != nil {
return nil, err
}
pew, err := st.PayloadExpectedWithdrawals()
if err != nil {
return nil, err
}
return &BeaconStateGloas{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
ProposerLookahead: lookahead,
LatestExecutionPayloadBid: ROExecutionPayloadBidFromConsensus(lepb),
Builders: BuildersFromConsensus(builders),
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
ExecutionPayloadAvailability: hexutil.Encode(epa),
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
LatestBlockHash: hexutil.Encode(lbh[:]),
PayloadExpectedWithdrawals: WithdrawalsFromConsensus(pew),
}, nil
}

View File

@@ -1,11 +1,15 @@
package structs
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/ethereum/go-ethereum/common/hexutil"
)
@@ -355,3 +359,214 @@ func TestIndexedAttestation_ToConsensus(t *testing.T) {
_, err := a.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestROExecutionPayloadBidFromConsensus(t *testing.T) {
t.Run("empty blobkzg commitments", func(t *testing.T) {
bid := &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
GasLimit: 100,
BuilderIndex: 7,
Slot: 9,
Value: 11,
ExecutionPayment: 22,
BlobKzgCommitments: [][]byte{},
}
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
require.NoError(t, err)
got := ROExecutionPayloadBidFromConsensus(roBid)
want := &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
BlockHash: hexutil.Encode(bid.BlockHash),
PrevRandao: hexutil.Encode(bid.PrevRandao),
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
GasLimit: "100",
BuilderIndex: "7",
Slot: "9",
Value: "11",
ExecutionPayment: "22",
BlobKzgCommitments: []string{},
}
assert.DeepEqual(t, want, got)
})
t.Run("default", func(t *testing.T) {
bid := &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
GasLimit: 100,
BuilderIndex: 7,
Slot: 9,
Value: 11,
ExecutionPayment: 22,
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x06}, 48)},
}
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
require.NoError(t, err)
var bkcs []string
for _, commitment := range roBid.BlobKzgCommitments() {
bkcs = append(bkcs, hexutil.Encode(commitment))
}
got := ROExecutionPayloadBidFromConsensus(roBid)
want := &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
BlockHash: hexutil.Encode(bid.BlockHash),
PrevRandao: hexutil.Encode(bid.PrevRandao),
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
GasLimit: "100",
BuilderIndex: "7",
Slot: "9",
Value: "11",
ExecutionPayment: "22",
BlobKzgCommitments: bkcs,
}
assert.DeepEqual(t, want, got)
})
}
func TestBuilderConversionsFromConsensus(t *testing.T) {
builder := &eth.Builder{
Pubkey: bytes.Repeat([]byte{0xAA}, 48),
Version: bytes.Repeat([]byte{0x01}, 4),
ExecutionAddress: bytes.Repeat([]byte{0xBB}, 20),
Balance: 42,
DepositEpoch: 3,
WithdrawableEpoch: 4,
}
wantBuilder := &Builder{
Pubkey: hexutil.Encode(builder.Pubkey),
Version: hexutil.Encode(builder.Version),
ExecutionAddress: hexutil.Encode(builder.ExecutionAddress),
Balance: "42",
DepositEpoch: "3",
WithdrawableEpoch: "4",
}
assert.DeepEqual(t, wantBuilder, BuilderFromConsensus(builder))
assert.DeepEqual(t, []*Builder{wantBuilder}, BuildersFromConsensus([]*eth.Builder{builder}))
}
func TestBuilderPendingPaymentConversionsFromConsensus(t *testing.T) {
withdrawal := &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x10}, 20),
Amount: 15,
BuilderIndex: 2,
}
payment := &eth.BuilderPendingPayment{
Weight: 5,
Withdrawal: withdrawal,
}
wantWithdrawal := &BuilderPendingWithdrawal{
FeeRecipient: hexutil.Encode(withdrawal.FeeRecipient),
Amount: "15",
BuilderIndex: "2",
}
wantPayment := &BuilderPendingPayment{
Weight: "5",
Withdrawal: wantWithdrawal,
}
assert.DeepEqual(t, wantPayment, BuilderPendingPaymentFromConsensus(payment))
assert.DeepEqual(t, []*BuilderPendingPayment{wantPayment}, BuilderPendingPaymentsFromConsensus([]*eth.BuilderPendingPayment{payment}))
assert.DeepEqual(t, wantWithdrawal, BuilderPendingWithdrawalFromConsensus(withdrawal))
assert.DeepEqual(t, []*BuilderPendingWithdrawal{wantWithdrawal}, BuilderPendingWithdrawalsFromConsensus([]*eth.BuilderPendingWithdrawal{withdrawal}))
}
func TestBeaconStateGloasFromConsensus(t *testing.T) {
st, err := util.NewBeaconStateGloas(func(state *eth.BeaconStateGloas) error {
state.GenesisTime = 123
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
state.Slot = 5
state.ProposerLookahead = []uint64{1, 2}
state.LatestExecutionPayloadBid = &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),
BlockHash: bytes.Repeat([]byte{0x13}, 32),
PrevRandao: bytes.Repeat([]byte{0x14}, 32),
FeeRecipient: bytes.Repeat([]byte{0x15}, 20),
GasLimit: 64,
BuilderIndex: 3,
Slot: 5,
Value: 99,
ExecutionPayment: 7,
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x16}, 48)},
}
state.Builders = []*eth.Builder{
{
Pubkey: bytes.Repeat([]byte{0x20}, 48),
Version: bytes.Repeat([]byte{0x21}, 4),
ExecutionAddress: bytes.Repeat([]byte{0x22}, 20),
Balance: 88,
DepositEpoch: 1,
WithdrawableEpoch: 2,
},
}
state.NextWithdrawalBuilderIndex = 9
state.ExecutionPayloadAvailability = []byte{0x01, 0x02}
state.BuilderPendingPayments = []*eth.BuilderPendingPayment{
{
Weight: 3,
Withdrawal: &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x23}, 20),
Amount: 4,
BuilderIndex: 5,
},
},
}
state.BuilderPendingWithdrawals = []*eth.BuilderPendingWithdrawal{
{
FeeRecipient: bytes.Repeat([]byte{0x24}, 20),
Amount: 6,
BuilderIndex: 7,
},
}
state.LatestBlockHash = bytes.Repeat([]byte{0x25}, 32)
state.PayloadExpectedWithdrawals = []*enginev1.Withdrawal{
{Index: 1, ValidatorIndex: 2, Address: bytes.Repeat([]byte{0x26}, 20), Amount: 10},
}
return nil
})
require.NoError(t, err)
got, err := BeaconStateGloasFromConsensus(st)
require.NoError(t, err)
require.Equal(t, "123", got.GenesisTime)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x10}, 32)), got.GenesisValidatorsRoot)
require.Equal(t, "5", got.Slot)
require.DeepEqual(t, []string{"1", "2"}, got.ProposerLookahead)
require.Equal(t, "9", got.NextWithdrawalBuilderIndex)
require.Equal(t, hexutil.Encode([]byte{0x01, 0x02}), got.ExecutionPayloadAvailability)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x25}, 32)), got.LatestBlockHash)
require.NotNil(t, got.LatestExecutionPayloadBid)
require.Equal(t, "64", got.LatestExecutionPayloadBid.GasLimit)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x11}, 32)), got.LatestExecutionPayloadBid.ParentBlockHash)
require.NotNil(t, got.Builders)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x20}, 48)), got.Builders[0].Pubkey)
require.Equal(t, "88", got.Builders[0].Balance)
require.Equal(t, "3", got.BuilderPendingPayments[0].Weight)
require.Equal(t, "4", got.BuilderPendingPayments[0].Withdrawal.Amount)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x23}, 20)), got.BuilderPendingPayments[0].Withdrawal.FeeRecipient)
require.Equal(t, "6", got.BuilderPendingWithdrawals[0].Amount)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x24}, 20)), got.BuilderPendingWithdrawals[0].FeeRecipient)
require.Equal(t, "1", got.PayloadExpectedWithdrawals[0].WithdrawalIndex)
require.Equal(t, "2", got.PayloadExpectedWithdrawals[0].ValidatorIndex)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x26}, 20)), got.PayloadExpectedWithdrawals[0].ExecutionAddress)
require.Equal(t, "10", got.PayloadExpectedWithdrawals[0].Amount)
}

View File

@@ -285,6 +285,13 @@ type GetBlobsResponse struct {
Data []string `json:"data"` //blobs
}
type GetExecutionPayloadEnvelopeResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data *SignedExecutionPayloadEnvelope `json:"data"`
}
type SSZQueryRequest struct {
Query string `json:"query"`
IncludeProof bool `json:"include_proof,omitempty"`

View File

@@ -112,3 +112,8 @@ type LightClientOptimisticUpdateEvent struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type PayloadEvent struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root"`
}

View File

@@ -262,3 +262,23 @@ type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}
type Builder struct {
Pubkey string `json:"pubkey"`
Version string `json:"version"`
ExecutionAddress string `json:"execution_address"`
Balance string `json:"balance"`
DepositEpoch string `json:"deposit_epoch"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type BuilderPendingPayment struct {
Weight string `json:"weight"`
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
}
type BuilderPendingWithdrawal struct {
FeeRecipient string `json:"fee_recipient"`
Amount string `json:"amount"`
BuilderIndex string `json:"builder_index"`
}

View File

@@ -221,3 +221,51 @@ type BeaconStateFulu struct {
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
}
type BeaconStateGloas struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
Builders []*Builder `json:"builders"`
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
LatestBlockHash string `json:"latest_block_hash"`
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
}

View File

@@ -18,6 +18,7 @@ import (
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
@@ -44,6 +45,9 @@ type ForkchoiceFetcher interface {
SetForkChoiceGenesisTime(time.Time)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HasFullNode([32]byte) bool
PayloadContentLookup([32]byte) ([32]byte, bool)
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
InsertPayload(interfaces.ROExecutionPayloadEnvelope) error
@@ -53,6 +57,7 @@ type ForkchoiceFetcher interface {
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
DependentRoot(primitives.Epoch) ([32]byte, error)
CanonicalNodeAtSlot(primitives.Slot) ([32]byte, bool)
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -114,6 +119,7 @@ type FinalizationFetcher interface {
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
ParentPayloadReady(interfaces.ReadOnlyBeaconBlock) bool
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
@@ -403,6 +409,32 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// ParentPayloadReady returns true if the block's parent payload is available
// in forkchoice. For pre-Gloas blocks or blocks building on empty, this always
// returns true. For blocks building on full, it checks that the full node
// exists.
func (s *Service) ParentPayloadReady(blk interfaces.ReadOnlyBeaconBlock) bool {
if blk.Version() < version.Gloas {
return true
}
parentRoot := blk.ParentRoot()
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
if err != nil {
return false
}
bid, err := blk.Body().SignedExecutionPayloadBid()
if err != nil || bid == nil || bid.Message == nil {
return false
}
parentBlockHash := [32]byte(bid.Message.ParentBlockHash)
if parentBlockHash != blockHash {
return true // builds on empty, no full node needed
}
return s.cfg.ForkChoiceStore.HasFullNode(parentRoot)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -42,6 +42,27 @@ func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
}
// HighestReceivedBlockRoot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockRoot() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
}
// HasFullNode returns the corresponding value from forkchoice
func (s *Service) HasFullNode(root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasFullNode(root)
}
// PayloadContentLookup returns the preferred payload-content lookup key from forkchoice.
func (s *Service) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.PayloadContentLookup(root)
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
s.cfg.ForkChoiceStore.RLock()

View File

@@ -620,6 +620,121 @@ func TestService_IsFinalized(t *testing.T) {
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
}
func TestParentPayloadReady(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 0
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t)
ctx := t.Context()
fcs := tr.fcs
parentRoot := [32]byte{1}
parentBlockHash := [32]byte{10}
zeroHash := params.BeaconConfig().ZeroHash
// Insert parent node into forkchoice.
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, zeroHash, parentBlockHash, zeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, parentROBlock))
t.Run("pre-Gloas always true", func(t *testing.T) {
blk := util.HydrateSignedBeaconBlockDeneb(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{ParentRoot: parentRoot[:]},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
})
t.Run("parent not in forkchoice", func(t *testing.T) {
unknownParent := [32]byte{99}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: unknownParent[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
})
t.Run("builds on empty", func(t *testing.T) {
differentHash := [32]byte{99}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: differentHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
})
t.Run("builds on full without payload", func(t *testing.T) {
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
})
t.Run("builds on full with payload", func(t *testing.T) {
pe, err := blocks.WrappedROExecutionPayloadEnvelope(&ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: parentRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{},
})
require.NoError(t, err)
require.NoError(t, fcs.InsertPayload(pe))
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
})
}
func Test_hashForGenesisRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()

View File

@@ -321,7 +321,7 @@ func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, paren
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// If it is an epoch boundary then process slots to get the right
@@ -343,7 +343,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
// right proposer index pre-Fulu, either way we need to copy the state to process it.
st = st.Copy()
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return emptyAttri

View File

@@ -717,14 +717,14 @@ func Test_GetPayloadAttribute(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
require.Equal(t, true, attr.IsEmpty())
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
@@ -732,7 +732,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
}
@@ -747,7 +747,7 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
}
@@ -757,14 +757,14 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateCapella(t, 1)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
@@ -775,7 +775,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
@@ -809,14 +809,14 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{}, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
@@ -827,7 +827,7 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()

View File

@@ -18,19 +18,21 @@ import (
"github.com/sirupsen/logrus"
)
func (s *Service) isNewHead(r [32]byte) bool {
func (s *Service) isNewHead(r [32]byte, full bool) bool {
s.headLock.RLock()
defer s.headLock.RUnlock()
currentHeadRoot := s.originBlockRoot
currentFull := false
if s.head != nil {
currentHeadRoot = s.headRoot()
currentFull = s.head.full
}
return r != currentHeadRoot || r == [32]byte{}
return r != currentHeadRoot || full != currentFull || r == [32]byte{}
}
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
func (s *Service) getStateAndBlock(ctx context.Context, r, h [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
if !s.hasBlockInInitSyncOrDB(ctx, r) {
return nil, nil, errors.New("block does not exist")
}
@@ -38,7 +40,7 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
if err != nil {
return nil, nil, err
}
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
headState, err := s.cfg.StateGen.StateByRoot(ctx, h)
if err != nil {
return nil, nil, err
}
@@ -70,7 +72,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
return
}
// If head has not been updated and attributes are nil, we can skip the FCU.
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
if !s.isNewHead(cfg.headRoot, false) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
return
}
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
@@ -81,7 +83,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
if s.isNewHead(fcuArgs.headRoot) {
if s.isNewHead(fcuArgs.headRoot, false) {
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}

View File

@@ -19,23 +19,42 @@ import (
func TestService_isNewHead(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
require.Equal(t, true, service.isNewHead([32]byte{}))
// Zero root is always a new head
require.Equal(t, true, service.isNewHead([32]byte{}, false))
require.Equal(t, true, service.isNewHead([32]byte{}, true))
// Different root is a new head
service.head = &head{root: [32]byte{1}}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{1}))
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
// Same root and same full status is not a new head
require.Equal(t, false, service.isNewHead([32]byte{1}, false))
// Same root but different full status is a new head
require.Equal(t, true, service.isNewHead([32]byte{1}, true))
// Same root and both full is not a new head
service.head = &head{root: [32]byte{1}, full: true}
require.Equal(t, false, service.isNewHead([32]byte{1}, true))
// Same root, head is full but incoming is not full, is a new head
require.Equal(t, true, service.isNewHead([32]byte{1}, false))
// Nil head should use origin root
service.head = nil
service.originBlockRoot = [32]byte{3}
require.Equal(t, true, service.isNewHead([32]byte{2}))
require.Equal(t, false, service.isNewHead([32]byte{3}))
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
require.Equal(t, false, service.isNewHead([32]byte{3}, false))
// Nil head with full=true is always a new head (originBlockRoot has full=false)
require.Equal(t, true, service.isNewHead([32]byte{3}, true))
}
func TestService_getHeadStateAndBlock(t *testing.T) {
beaconDB := testDB.SetupDB(t)
service := setupBeaconChain(t, beaconDB)
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{}, [32]byte{})
require.ErrorContains(t, "block does not exist", err)
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(&ethpb.SignedBeaconBlock{Signature: []byte{1}}))

View File

@@ -1,13 +1,42 @@
package blockchain
import (
"context"
"math"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
func (s *Service) waitUntilEpoch(target primitives.Epoch, secondsPerSlot uint64) error {
if slots.ToEpoch(s.CurrentSlot()) >= target {
return nil
}
ticker := slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
defer ticker.Done()
for {
select {
case slot := <-ticker.C():
if slots.ToEpoch(slot) >= target {
return nil
}
case <-s.ctx.Done():
return s.ctx.Err()
}
}
}
// getLookupParentRoot returns the root that serves as key to generate the parent state for the passed beacon block.
// if it is based on empty or it is pre-Gloas, it is the parent root of the block, otherwise if it is based on full it is
// the parent hash.
@@ -43,3 +72,142 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
}
return parentRoot, nil
}
func (s *Service) runLatePayloadTasks() {
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Failed to wait for initial sync")
return
}
cfg := params.BeaconConfig()
if cfg.GloasForkEpoch == math.MaxUint64 {
return
}
if err := s.waitUntilEpoch(cfg.GloasForkEpoch, cfg.SecondsPerSlot); err != nil {
return
}
offset := cfg.SlotComponentDuration(cfg.PayloadAttestationDueBPS)
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, offset, cfg.SecondsPerSlot)
defer ticker.Done()
for {
select {
case <-ticker.C():
s.latePayloadTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting late payload tasks routine")
return
}
}
}
func (s *Service) checkIfProposing(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
if e == stateEpoch || fuluAndNextEpoch {
return s.trackedProposer(st, slot)
}
return cache.TrackedValidator{}, false
}
// This is a Gloas version of getPayloadAttribute that avoids all the clutter that was originally due to the proposer Index.
// It is guaranteed to be called for the current slot + 1 and the head state to have been advanced to at least the current epoch.
func (s *Service) getPayloadAttributeGloas(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, proposing := s.checkIfProposing(st, slot)
if !proposing {
return emptyAttri
}
st, err := transition.ProcessSlotsIfNeeded(ctx, st, accessRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return emptyAttri
}
// Get previous randao.
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return emptyAttri
}
// Get timestamp.
t, err := slots.StartTime(s.genesisTime, slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return emptyAttri
}
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
Withdrawals: withdrawals,
ParentBeaconBlockRoot: headRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
return attr
}
// latePayloadTasks updates the NSC and epoch boundary caches when there is no payload in the current slot (and there is a block)
// The case where the block was also missing would have been dealt by lateBlockTasks already.
// We call FCU only if we are proposing next slot, as the execution head is assumed to not have changed.
func (s *Service) latePayloadTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if currentSlot != s.HeadSlot() {
// We must've already sent a FCU and updated the caches in lateBlockTaks.
return
}
r, err := s.HeadRoot(ctx)
if err != nil {
log.WithError(err).Error("Failed to get head root")
return
}
hr := [32]byte(r)
if s.payloadBeingSynced.isSyncing(hr) {
return
}
if s.HasFullNode(hr) {
return
}
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
log.WithError(err).Error("Failed to get head state")
return
}
if !s.inRegularSync() {
return
}
attr := s.getPayloadAttributeGloas(ctx, st, currentSlot+1, r, r)
if attr == nil || attr.IsEmpty() {
return
}
beaconLatePayloadTaskTriggeredTotal.Inc()
// Head is the empty block.
bh, err := st.LatestBlockHash()
if err != nil {
log.WithError(err).Error("Could not get latest block hash to notify engine")
return
}
pid, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attr)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return
}
if pid == nil {
log.Warn("Received nil payload ID from forkchoice update.")
return
}
var pId [8]byte
copy(pId[:], pid[:])
s.cfg.PayloadIDCache.Set(currentSlot+1, hr, pId)
}

View File

@@ -446,6 +446,37 @@ func TestPostPayloadHeadUpdate_NotHead(t *testing.T) {
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, headRoot[:]))
}
func TestPostPayloadHeadUpdate_SetsHeadFull(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
root := bytesutil.ToBytes32([]byte("root1"))
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
signed, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s.head = &head{root: root, block: signed, state: st, slot: 1}
require.Equal(t, false, s.head.full)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: root[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:], ParentHash: make([]byte, 32)},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, root[:]))
s.headLock.RLock()
require.Equal(t, true, s.head.full)
s.headLock.RUnlock()
}
func TestGetLookupParentRoot_PreGloas(t *testing.T) {
service, _ := minimalTestService(t)
@@ -616,6 +647,71 @@ func TestGetLookupParentRoot_GloasParentPreForkEpoch(t *testing.T) {
require.Equal(t, parentRoot, got)
}
func TestLatePayloadTasks_ReturnsEarlyWhenBlockLate(t *testing.T) {
logHook := logTest.NewGlobal()
service, tr := setupGloasService(t, &mockExecution.EngineClient{})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
base.LatestBlockHash = blockHash[:]
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
state: st,
slot: 1,
}
// Set genesis time so CurrentSlot > HeadSlot.
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
service.latePayloadTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
// No payload ID should have been cached.
_, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
require.Equal(t, false, has)
}
func TestLatePayloadTasks_SendsFCU(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
base.LatestBlockHash = blockHash[:]
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
signed, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
block: signed,
state: st,
slot: 1,
}
// CurrentSlot == HeadSlot == 1: place genesis 1.5 slots ago so we're solidly in slot 1.
service.SetGenesisTime(time.Now().Add(-3 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second / 2))
service.SetForkChoiceGenesisTime(service.genesisTime)
service.latePayloadTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
require.LogsDoNotContain(t, logHook, "Could not get")
// Payload ID should have been cached.
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
require.Equal(t, true, has)
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
}
func TestLateBlockTasks_GloasFCU(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
@@ -646,3 +742,199 @@ func TestLateBlockTasks_GloasFCU(t *testing.T) {
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}
// TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead verifies that saveHead does not
// treat the head as "full" when the latest execution payload bid was issued in a pre-fork epoch.
// This guards against the Fulu->Gloas upgrade-seeded bid (bid.BlockHash == latestBlockHash,
// bid.Slot == 0) causing a spurious full=true head before any real Gloas bid has been processed.
func TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 1
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
// Create a Gloas state where IsParentBlockFull()==true (bid.BlockHash == LatestBlockHash)
// but bid.Slot is 0 (epoch 0, pre-fork). This mimics the upgrade-seeded state.
base, blk := testGloasState(t, 1, parentRoot, blockHash)
base.LatestBlockHash = blockHash[:]
// bid.Slot defaults to 0, which is before GloasForkEpoch=1.
// Set a valid initial head so saveHead's headBlock() call does not panic.
// We do NOT insert the old block into forkchoice because insertGloasBlock
// would claim the tree root slot; the target block (parentRoot=ZeroHash) must
// be the first node inserted so it can become the tree root.
oldBlk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{})
oldSigned, err2 := blocks.NewSignedBeaconBlock(oldBlk)
require.NoError(t, err2)
oldSt, err2 := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
LatestBlockHeader: &ethpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
Eth1Data: &ethpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment { pp := make([]*ethpb.BuilderPendingPayment, 64); for i := range pp { pp[i] = &ethpb.BuilderPendingPayment{Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}} }; return pp }(),
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]uint64, 64),
})
require.NoError(t, err2)
oldRoot := bytesutil.ToBytes32([]byte("oldroot1"))
service.head = &head{root: oldRoot, block: oldSigned, state: oldSt, slot: 0}
insertGloasBlock(t, service, base, blk, blockRoot)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
// Verify precondition: IsParentBlockFull() is true.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
// Verify guard precondition: bid.Slot is pre-fork.
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
isPrefork := slots.ToEpoch(bid.Slot()) < params.BeaconConfig().GloasForkEpoch
require.Equal(t, true, isPrefork, "precondition: bid.Slot must be pre-fork")
ssigned, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// saveHead should NOT mark the head as full because bid.Slot < GloasForkEpoch.
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
service.headLock.RLock()
headFull := service.head.full
service.headLock.RUnlock()
require.Equal(t, false, headFull, "head must not be full for upgrade-seeded bid")
}
// TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead verifies that saveHead correctly
// marks the head as full when the latest bid is from a post-fork epoch.
func TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 1
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
forkSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
require.NoError(t, err)
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
// Set a valid initial head so saveHead's headBlock() call does not panic.
// Do NOT use insertGloasBlock for the old block — the target block must be
// the first node inserted so it can claim the tree root (parentRoot=ZeroHash).
oldBlk2 := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{})
oldSigned2, err2 := blocks.NewSignedBeaconBlock(oldBlk2)
require.NoError(t, err2)
oldSt2, err2 := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{
Slot: 0,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
LatestBlockHeader: &ethpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
Eth1Data: &ethpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment { pp := make([]*ethpb.BuilderPendingPayment, 64); for i := range pp { pp[i] = &ethpb.BuilderPendingPayment{Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}} }; return pp }(),
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]uint64, 64),
})
require.NoError(t, err2)
oldRoot2 := bytesutil.ToBytes32([]byte("oldroot2"))
service.head = &head{root: oldRoot2, block: oldSigned2, state: oldSt2, slot: 0}
base, blk := testGloasState(t, forkSlot+1, parentRoot, blockHash)
base.LatestBlockHash = blockHash[:]
// Set bid.Slot to a post-fork epoch slot.
base.LatestExecutionPayloadBid.Slot = forkSlot + 1
insertGloasBlock(t, service, base, blk, blockRoot)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
// Verify preconditions.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
isPostfork := slots.ToEpoch(bid.Slot()) >= params.BeaconConfig().GloasForkEpoch
require.Equal(t, true, isPostfork, "precondition: bid.Slot must be post-fork")
ssigned, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// saveHead SHOULD mark the head as full because bid.Slot >= GloasForkEpoch.
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
service.headLock.RLock()
headFull := service.head.full
service.headLock.RUnlock()
require.Equal(t, true, headFull, "head must be full for real post-fork bid")
}
// TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot verifies that lateBlockTasks
// uses headRoot (not LatestBlockHash) as the accessRoot when the bid is pre-fork epoch.
// Without this guard, the upgrade-seeded bid would cause lateBlockTasks to use the wrong
// access root for the next-slot cache.
func TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 1
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
// Make IsParentBlockFull() true: bid.BlockHash == LatestBlockHash.
base.LatestBlockHash = blockHash[:]
// bid.Slot is 0 (pre-fork epoch): the epoch guard should prevent using LatestBlockHash as accessRoot.
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
state: st,
slot: 1,
}
// Trigger late block logic: CurrentSlot > HeadSlot.
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
service.SetForkChoiceGenesisTime(service.genesisTime)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}

View File

@@ -50,6 +50,7 @@ type head struct {
block interfaces.ReadOnlySignedBeaconBlock // current head block.
state state.BeaconState // current head state.
slot primitives.Slot // the head block slot number
full bool // whether the head is post-CL or post-EL after Gloas
optimistic bool // optimistic status when saved head
}
@@ -60,8 +61,24 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
defer span.End()
// Pre-Gloas we use empty for head because we still key states by blockroot
var full bool
var err error
if headState.Version() >= version.Gloas {
gloasFirstSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
if err != nil {
return errors.Wrap(err, "could not compute gloas first slot")
}
if headState.Slot() > gloasFirstSlot {
full, err = headState.IsParentBlockFull()
if err != nil {
return errors.Wrap(err, "could not determine if head is full or not")
}
}
}
// Do nothing if head hasn't changed.
if !s.isNewHead(newHeadRoot) {
if !s.isNewHead(newHeadRoot, full) {
return nil
}
@@ -157,6 +174,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
state: headState,
optimistic: isOptimistic,
slot: headBlock.Block().Slot(),
full: full,
}
if err := s.setHead(newHead); err != nil {
return errors.Wrap(err, "could not set head")
@@ -217,6 +235,7 @@ func (s *Service) setHead(newHead *head) error {
root: newHead.root,
block: bCp,
state: newHead.state.Copy(),
full: newHead.full,
optimistic: newHead.optimistic,
slot: newHead.slot,
}

View File

@@ -234,6 +234,25 @@ var (
Help: "The maximum number of blobs allowed in a block.",
},
)
beaconExecutionPayloadEnvelopeValidTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_execution_payload_envelope_valid_total",
Help: "Count the number of execution payload envelopes that were processed successfully.",
})
beaconExecutionPayloadEnvelopeInvalidTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_execution_payload_envelope_invalid_total",
Help: "Count the number of execution payload envelopes that failed processing.",
})
beaconExecutionPayloadEnvelopeProcessingDurationSeconds = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "beacon_execution_payload_envelope_processing_duration_seconds",
Help: "Captures end-to-end processing time for execution payload envelopes.",
Buckets: prometheus.DefBuckets,
},
)
beaconLatePayloadTaskTriggeredTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_late_payload_task_triggered_total",
Help: "Count the number of times late payload tasks fired.",
})
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"context"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
@@ -57,7 +58,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
// save it to the cache.
baseState, err := s.getAttPreState(ctx, tgt)
if err != nil {
return err
return fmt.Errorf("get att pre state for att root %s: %w", fmt.Sprintf("%#x", a.GetData().BeaconBlockRoot), err)
}
// Verify attestation target is from current epoch or previous epoch.

View File

@@ -1,6 +1,7 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"time"
@@ -107,7 +108,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
}
if cfg.roblock.Version() < version.Gloas {
s.sendFCU(cfg)
} else if s.isNewHead(cfg.headRoot) {
} else if s.isNewHead(cfg.headRoot, false) { // We reach this only when the incoming block is head.
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Could not save head")
}
@@ -388,9 +389,49 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
return nil
}
// refreshCaches updates the next slot state cache and epoch boundary caches.
// Before Fulu this is done synchronously, after Fulu it is deferred to a goroutine.
func (s *Service) refreshCaches(ctx context.Context, currentSlot primitives.Slot, headRoot [32]byte, headState state.BeaconState, accessRoot [32]byte) {
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if lastState.Version() < version.Fulu {
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
} else {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
}()
}
}
// updateCachesAndEpochBoundary updates the next slot state cache and handles
// epoch boundary processing. If the lastRoot matches accessRoot, the cached
// last state is reused; otherwise, the head state is advanced instead.
func (s *Service) updateCachesAndEpochBoundary(ctx context.Context, currentSlot primitives.Slot, headState state.BeaconState, accessRoot [32]byte, lastRoot []byte, lastState state.BeaconState) {
if bytes.Equal(lastRoot, accessRoot[:]) {
// Happy case, the last advanced state is head, we thus keep it
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
} else {
// Last advanced state was not head, we do not advance this but rather use headstate
headState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, accessRoot[:], headState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, accessRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.ReadOnlyBeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
// return early if we are advancing to a past epoch
@@ -981,37 +1022,20 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Before Fulu we need to process the next slot to find out if we are proposing.
if lastState.Version() < version.Fulu {
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
var accessRoot [32]byte
isFull, err := headState.IsParentBlockFull()
gloasFirstSlot, _ := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
if err != nil || !isFull || headState.Slot() <= gloasFirstSlot {
accessRoot = headRoot
} else {
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
defer func() {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}()
}()
accessRoot, err = headState.LatestBlockHash()
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash, using head root as access root")
accessRoot = headRoot
}
}
s.refreshCaches(ctx, currentSlot, headRoot, headState, accessRoot)
// return early if we already started building a block for the current
// head root
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
@@ -1019,7 +1043,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
return
}
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:], accessRoot[:])
// return early if we are not proposing next slot
if attribute.IsEmpty() {
return
@@ -1035,28 +1059,28 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
} else {
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
s.headLock.RUnlock()
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
}

View File

@@ -44,7 +44,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig) (*fcuConfig, error) {
if err != nil {
return nil, err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:], cfg.headRoot[:])
return fcuArgs, nil
}
@@ -89,7 +89,7 @@ func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
// incoming block is non-canonical, that is, based on the head root.
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig) (*fcuConfig, error) {
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot)
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot, cfg.headRoot)
if err != nil {
return nil, err
}

View File

@@ -3639,3 +3639,61 @@ func TestHandleBlockPayloadAttestations(t *testing.T) {
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
})
}
func TestUpdateCachesAndEpochBoundary_MatchingRoots(t *testing.T) {
service := testServiceNoDB(t)
st, _ := util.DeterministicGenesisState(t, 1)
accessRoot := [32]byte{'a'}
service.updateCachesAndEpochBoundary(t.Context(), 1, st, accessRoot, accessRoot[:], st)
cached := transition.NextSlotState(accessRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
}
func TestUpdateCachesAndEpochBoundary_DifferentRoots(t *testing.T) {
service := testServiceNoDB(t)
headState, _ := util.DeterministicGenesisState(t, 1)
lastState, _ := util.DeterministicGenesisState(t, 1)
accessRoot := [32]byte{'a'}
lastRoot := [32]byte{'b'}
service.updateCachesAndEpochBoundary(t.Context(), 1, headState, accessRoot, lastRoot[:], lastState)
// Cache should be keyed by accessRoot, not lastRoot.
cached := transition.NextSlotState(accessRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
cached = transition.NextSlotState(lastRoot[:], 1)
require.Equal(t, true, cached == nil)
}
func TestRefreshCaches_NoCachedState(t *testing.T) {
service := testServiceNoDB(t)
st, _ := util.DeterministicGenesisState(t, 1)
headRoot := [32]byte{'h'}
service.refreshCaches(t.Context(), 1, headRoot, st, headRoot)
cached := transition.NextSlotState(headRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
}
func TestRefreshCaches_CachedStateMatchesAccessRoot(t *testing.T) {
service := testServiceNoDB(t)
st, _ := util.DeterministicGenesisState(t, 1)
accessRoot := [32]byte{'a'}
headRoot := [32]byte{'h'}
// Pre-populate the cache with accessRoot.
require.NoError(t, transition.UpdateNextSlotCache(t.Context(), accessRoot[:], st))
service.refreshCaches(t.Context(), 1, headRoot, st, accessRoot)
cached := transition.NextSlotState(accessRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
}

View File

@@ -49,7 +49,13 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.getAttPreState(ctx, target)
preState, err := s.getAttPreState(ctx, target)
if err != nil {
return nil, fmt.Errorf("get att pre state: %w", err)
}
return preState, nil
}
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
@@ -134,38 +140,64 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
start = time.Now()
// return early if we haven't changed head
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
newHeadRoot, newHeadBlockHash, full, err := s.cfg.ForkChoiceStore.FullHead(ctx)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
return
}
if !s.isNewHead(newHeadRoot) {
if !s.isNewHead(newHeadRoot, full) {
return
}
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
var accessRoot [32]byte
postGloas := slots.ToEpoch(proposingSlot) >= params.BeaconConfig().GloasForkEpoch
if full && postGloas {
accessRoot = newHeadBlockHash
} else {
accessRoot = newHeadRoot
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot, accessRoot)
if err != nil {
log.WithError(err).Error("Could not get head block")
log.WithError(err).Error("Could not get head block and state")
return
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
fcuArgs := &fcuConfig{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock,
proposingSlot: proposingSlot,
}
if s.inRegularSync() {
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
attr := s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:], accessRoot[:])
if attr != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
if postGloas {
go func() {
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, newHeadBlockHash, attr)
if err != nil {
log.WithError(err).Error("Could not update forkchoice with engine")
}
if pid == nil {
if attr != nil {
log.Warn("Engine did not return a payload ID for the fork choice update with attributes")
}
return
}
var pId [8]byte
copy(pId[:], pid[:])
s.cfg.PayloadIDCache.Set(proposingSlot, newHeadRoot, pId)
}()
} else {
fcuArgs := &fcuConfig{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock,
proposingSlot: proposingSlot,
attributes: attr,
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
}
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
if err := s.saveHead(s.ctx, newHeadRoot, headBlock, headState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
s.pruneAttsFromPool(s.ctx, headState, headBlock)
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -153,7 +153,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Have we been finalizing? Should we start saving hot states to db?
if err := s.checkSaveHotStateDB(ctx); err != nil {
return errors.Wrap(err, "check save hot state db")
log.WithError(err).Error("Could not check save hot state DB")
}
// We apply the same heuristic to some of our more important caches.
@@ -366,6 +366,8 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
}
}()
}
go s.checkpointStateCache.EvictUpTo(finalized.Epoch)
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning

View File

@@ -4,7 +4,10 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
@@ -29,9 +32,18 @@ type ExecutionPayloadEnvelopeReceiver interface {
}
// ReceiveExecutionPayloadEnvelope processes a signed execution payload envelope for the Gloas fork.
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) error {
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) (err error) {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveExecutionPayloadEnvelope")
defer span.End()
start := time.Now()
defer func() {
beaconExecutionPayloadEnvelopeProcessingDurationSeconds.Observe(time.Since(start).Seconds())
if err != nil {
beaconExecutionPayloadEnvelopeInvalidTotal.Inc()
return
}
beaconExecutionPayloadEnvelopeValidTotal.Inc()
}()
envelope, err := signed.Envelope()
if err != nil {
@@ -102,6 +114,14 @@ func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed in
return err
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.PayloadProcessed,
Data: &statefeed.PayloadProcessedData{
Slot: envelope.Slot(),
BlockRoot: root,
},
})
execution, err := envelope.Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload from envelope for logging")
@@ -129,13 +149,21 @@ func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces
s.headLock.Lock()
s.head.state = st
s.head.full = true
s.headLock.Unlock()
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
log.WithError(err).Error("Could not update next slot cache")
}
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
log.WithError(err).Error("Could not update next slot cache")
}
if err := s.handleEpochBoundary(ctx, envelope.Slot(), st, blockHash[:]); err != nil {
log.WithError(err).Error("Could not handle epoch boundary")
}
}()
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot)
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot, blockHash[:])
if s.inRegularSync() {
go func() {
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, blockHash, attr)

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -29,16 +28,12 @@ func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb
if err != nil {
return err
}
ptc, err := gloas.PayloadCommittee(ctx, st, a.Data.Slot)
idx, err := gloas.PayloadCommitteeIndex(ctx, st, a.Data.Slot, a.ValidatorIndex)
if err != nil {
return err
}
idx := slices.Index(ptc, a.ValidatorIndex)
if idx == -1 {
return errors.New("validator not in PTC")
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(idx), a.Data.PayloadPresent, a.Data.BlobDataAvailable)
s.cfg.ForkChoiceStore.SetPTCVote(root, idx, a.Data.PayloadPresent, a.Data.BlobDataAvailable)
return nil
}

View File

@@ -213,6 +213,7 @@ func (s *Service) Start() {
}
s.spawnProcessAttestationsRoutine()
go s.runLateBlockTasks()
go s.runLatePayloadTasks()
}
// Stop the blockchain service's main event loop and associated goroutines.
@@ -343,7 +344,7 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
return errors.Wrap(err, "could not get head state")
}
}
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false, false}); err != nil {
return errors.Wrap(err, "could not set head")
}
log.WithFields(logrus.Fields{
@@ -432,6 +433,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
genesisState,
genesisBlk.Block().Slot(),
false,
false,
}); err != nil {
log.WithError(err).Fatal("Could not set head")
}

View File

@@ -166,11 +166,11 @@ func (s *Service) setupForkchoiceCheckpoints() error {
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
log.WithError(err).Error("Could not update forkchoice's justified checkpoint, trying to update finalized checkpoint anyway")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: fRoot}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
log.WithError(err).Error("Could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
return nil

View File

@@ -77,6 +77,12 @@ type ChainService struct {
DataColumns []blocks.VerifiedRODataColumn
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
MockCanonicalRoots map[primitives.Slot][32]byte
MockCanonicalFull map[primitives.Slot]bool
MockPayloadContentLookup map[[32]byte][32]byte
MockPayloadContentIsFull map[[32]byte]bool
ParentPayloadReadyVal *bool
ForkchoiceRoots map[[32]byte]bool
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -579,7 +585,10 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
}
// InForkchoice mocks the same method in the chain service
func (s *ChainService) InForkchoice(_ [32]byte) bool {
func (s *ChainService) InForkchoice(root [32]byte) bool {
if s.ForkchoiceRoots != nil {
return s.ForkchoiceRoots[root]
}
return !s.NotFinalized
}
@@ -640,7 +649,7 @@ func prepareForkchoiceState(
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
st, err := state_native.InitializeFromProtoUnsafeBellatrix(base)
if err != nil {
return nil, blocks.ROBlock{}, err
}
@@ -699,7 +708,55 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockSlot()
}
return 0
if s.Slot != nil {
return *s.Slot
}
return s.BlockSlot
}
// HighestReceivedBlockRoot mocks the same method in the chain service
func (s *ChainService) HighestReceivedBlockRoot() [32]byte {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockRoot()
}
if s.Slot != nil && s.MockCanonicalRoots != nil {
if root, ok := s.MockCanonicalRoots[*s.Slot]; ok {
return root
}
}
if len(s.Root) == 32 {
return bytesutil.ToBytes32(s.Root)
}
return [32]byte{}
}
// HasFullNode mocks the same method in the chain service
func (s *ChainService) HasFullNode(root [32]byte) bool {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HasFullNode(root)
}
if s.Slot != nil && s.MockCanonicalRoots != nil && s.MockCanonicalFull != nil {
if r, ok := s.MockCanonicalRoots[*s.Slot]; ok && r == root {
return s.MockCanonicalFull[*s.Slot]
}
}
if s.ForkchoiceRoots != nil {
return s.ForkchoiceRoots[root]
}
return false
}
// PayloadContentLookup mocks the same method in the chain service.
func (s *ChainService) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.PayloadContentLookup(root)
}
if s.MockPayloadContentLookup != nil {
if value, ok := s.MockPayloadContentLookup[root]; ok {
return value, s.MockPayloadContentIsFull[root]
}
}
return root, false
}
// InsertNode mocks the same method in the chain service
@@ -785,6 +842,14 @@ func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ inte
return nil
}
// ParentPayloadReady mocks the same method in the chain service.
func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool {
if s.ParentPayloadReadyVal != nil {
return *s.ParentPayloadReadyVal
}
return true
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
@@ -795,6 +860,17 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
return c.TargetRoot, nil
}
func (c *ChainService) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
var root [32]byte
if c.MockCanonicalRoots != nil {
root = c.MockCanonicalRoots[slot]
}
if c.MockCanonicalFull != nil {
return root, c.MockCanonicalFull[slot]
}
return root, false
}
// MockSyncChecker is a mock implementation of blockchain.Checker.
// We can't make an assertion here that this is true because that would create a circular dependency.
type MockSyncChecker struct {

View File

@@ -22,6 +22,7 @@ go_library(
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
"proposer_preferences.go",
"registration.go",
"skip_slot_cache.go",
"subnet_ids.go",
@@ -55,6 +56,7 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
@@ -81,6 +83,7 @@ go_test(
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
"proposer_preferences_test.go",
"registration_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",

View File

@@ -9,10 +9,11 @@ import (
)
type AttestationConsensusData struct {
Slot primitives.Slot
HeadRoot []byte
Target forkchoicetypes.Checkpoint
Source forkchoicetypes.Checkpoint
Slot primitives.Slot
HeadRoot []byte
Target forkchoicetypes.Checkpoint
Source forkchoicetypes.Checkpoint
IsPayloadFull bool
}
// AttestationDataCache stores cached results of AttestationData requests.

View File

@@ -3,8 +3,10 @@ package cache
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -25,6 +27,14 @@ var (
Name: "check_point_state_cache_hit",
Help: "The number of check point state requests that are present in the cache.",
})
checkpointStateSize = promauto.NewGauge(prometheus.GaugeOpts{
Name: "check_point_state_cache_size",
Help: "The number of entries in the check point state cache.",
})
checkpointStateEvicted = promauto.NewCounter(prometheus.CounterOpts{
Name: "check_point_state_cache_evicted_total",
Help: "The number of entries evicted from the check point state cache.",
})
)
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
@@ -49,14 +59,14 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
item, exists := c.cache.Get(h)
if exists && item != nil {
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
if !exists || item == nil {
checkpointStateMiss.Inc()
return nil, nil
}
checkpointStateMiss.Inc()
return nil, nil
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
}
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
@@ -66,6 +76,35 @@ func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.
if err != nil {
return err
}
c.cache.Add(h, s)
checkpointStateSize.Set(float64(c.cache.Len()))
return nil
}
// EvictUpTo removes all entries from the cache whose state epoch is at
// or before the given epoch. Returns the number of evicted entries.
func (c *CheckpointStateCache) EvictUpTo(epoch primitives.Epoch) int {
evicted := 0
for _, key := range c.cache.Keys() {
// Peek is used here to avoid updating the recency of the entry,
// as we are only checking for eviction.
v, ok := c.cache.Peek(key)
if !ok {
continue
}
st := v.(state.ReadOnlyBeaconState)
if slots.ToEpoch(st.Slot()) <= epoch {
c.cache.Remove(key)
evicted++
}
}
if evicted > 0 {
checkpointStateSize.Set(float64(c.cache.Len()))
checkpointStateEvicted.Add(float64(evicted))
}
return evicted
}

View File

@@ -72,3 +72,76 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
}
func TestCheckpointStateCache_EvictFinalized_FinalizedEntry(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 32})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp, st))
evicted := c.EvictUpTo(1)
assert.Equal(t, 1, evicted, "expected finalized entry to be evicted")
s, err := c.StateByCheckpoint(cp)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cache to be empty after eviction")
}
func TestCheckpointStateCache_EvictFinalized_NotFinalizedEntry(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 160})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp, st))
evicted := c.EvictUpTo(3)
assert.Equal(t, 0, evicted, "expected non-finalized entry NOT to be evicted")
s, err := c.StateByCheckpoint(cp)
require.NoError(t, err)
assert.NotNil(t, s, "expected entry to still be in cache")
}
func TestCheckpointStateCache_EvictFinalized_Mixed(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st1, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 32})
require.NoError(t, err)
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
st2, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 64})
require.NoError(t, err)
cp5 := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'C'}, 32)}
st5, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 160})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp1, st1))
require.NoError(t, c.AddCheckpointState(cp2, st2))
require.NoError(t, c.AddCheckpointState(cp5, st5))
evicted := c.EvictUpTo(3)
assert.Equal(t, 2, evicted, "expected epochs 1 and 2 to be evicted")
s, err := c.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cp1 to be evicted")
s, err = c.StateByCheckpoint(cp2)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cp2 to be evicted")
s, err = c.StateByCheckpoint(cp5)
require.NoError(t, err)
assert.NotNil(t, s, "expected cp5 to still be in cache")
}
func TestCheckpointStateCache_EvictFinalized_EmptyCache(t *testing.T) {
c := cache.NewCheckpointStateCache()
evicted := c.EvictUpTo(0)
assert.Equal(t, 0, evicted, "expected no eviction from empty cache")
}

View File

@@ -0,0 +1,87 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// ProposerPreference stores the proposer fee recipient and gas limit for a slot.
type ProposerPreference struct {
FeeRecipient []byte
GasLimit uint64
}
// ProposerPreferencesCache stores proposer preferences by slot.
type ProposerPreferencesCache struct {
slotToPreferences map[primitives.Slot]ProposerPreference
lock sync.RWMutex
}
// NewProposerPreferencesCache initializes a proposer preferences cache.
func NewProposerPreferencesCache() *ProposerPreferencesCache {
return &ProposerPreferencesCache{
slotToPreferences: make(map[primitives.Slot]ProposerPreference),
}
}
// Add stores proposer preferences for a slot. If the slot already exists, the
// existing value is kept and false is returned.
func (c *ProposerPreferencesCache) Add(slot primitives.Slot, feeRecipient []byte, gasLimit uint64) bool {
c.lock.Lock()
defer c.lock.Unlock()
if _, ok := c.slotToPreferences[slot]; ok {
return false
}
// FeeRecipient comes from validated SSZ-decoded proposer preferences, so
// retaining the slice reference here is intentional.
c.slotToPreferences[slot] = ProposerPreference{
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
}
return true
}
// Get returns proposer preferences for a slot.
func (c *ProposerPreferencesCache) Get(slot primitives.Slot) (ProposerPreference, bool) {
c.lock.RLock()
defer c.lock.RUnlock()
pref, ok := c.slotToPreferences[slot]
if !ok {
return ProposerPreference{}, false
}
return pref, true
}
// Has returns true if proposer preferences for the slot already exist.
func (c *ProposerPreferencesCache) Has(slot primitives.Slot) bool {
c.lock.RLock()
defer c.lock.RUnlock()
_, ok := c.slotToPreferences[slot]
return ok
}
// PruneBefore removes all proposer preferences for slots before the provided slot.
func (c *ProposerPreferencesCache) PruneBefore(slot primitives.Slot) {
c.lock.Lock()
defer c.lock.Unlock()
for cachedSlot := range c.slotToPreferences {
if cachedSlot < slot {
delete(c.slotToPreferences, cachedSlot)
}
}
}
// Clear removes all cached proposer preferences.
func (c *ProposerPreferencesCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.slotToPreferences = make(map[primitives.Slot]ProposerPreference)
}

View File

@@ -0,0 +1,63 @@
package cache
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProposerPreferencesCache_AddGetHas(t *testing.T) {
c := NewProposerPreferencesCache()
slot := primitives.Slot(123)
feeRecipient := []byte{1, 2, 3, 4}
require.Equal(t, false, c.Has(slot))
added := c.Add(slot, feeRecipient, 42)
require.Equal(t, true, added)
require.Equal(t, true, c.Has(slot))
pref, ok := c.Get(slot)
require.Equal(t, true, ok)
require.DeepEqual(t, feeRecipient, pref.FeeRecipient)
require.Equal(t, uint64(42), pref.GasLimit)
}
func TestProposerPreferencesCache_AddDuplicateSlot(t *testing.T) {
c := NewProposerPreferencesCache()
slot := primitives.Slot(456)
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
require.Equal(t, false, c.Add(slot, []byte{2}, 20))
pref, ok := c.Get(slot)
require.Equal(t, true, ok)
require.DeepEqual(t, []byte{1}, pref.FeeRecipient)
require.Equal(t, uint64(10), pref.GasLimit)
}
func TestProposerPreferencesCache_Clear(t *testing.T) {
c := NewProposerPreferencesCache()
slot := primitives.Slot(789)
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
c.Clear()
require.Equal(t, false, c.Has(slot))
_, ok := c.Get(slot)
require.Equal(t, false, ok)
}
func TestProposerPreferencesCache_PruneBefore(t *testing.T) {
c := NewProposerPreferencesCache()
require.Equal(t, true, c.Add(10, []byte{1}, 10))
require.Equal(t, true, c.Add(11, []byte{2}, 11))
require.Equal(t, true, c.Add(12, []byte{3}, 12))
c.PruneBefore(11)
require.Equal(t, false, c.Has(10))
require.Equal(t, true, c.Has(11))
require.Equal(t, true, c.Has(12))
}

View File

@@ -172,7 +172,7 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.ReadOnlyBeaconState) error {
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
// cycled (new address), don't update it.

View File

@@ -32,7 +32,7 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
}
// UpdatePositionsInCommittee -- fake.
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.ReadOnlyBeaconState) error {
return nil
}

View File

@@ -27,6 +27,7 @@ go_library(
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/math"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
@@ -41,26 +42,26 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
if beaconState.NumValidators() != len(inactivityScores) {
return nil, nil, errors.New("num of validators is different than num of inactivity scores")
}
if err := beaconState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := beaconState.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
// Set validator's balance, inactivity score and slashed/withdrawable status.
v := &precompute.Validator{
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
CurrentEpochEffectiveBalance: val.EffectiveBalance,
InactivityScore: inactivityScores[idx],
IsSlashed: val.Slashed(),
IsWithdrawableCurrentEpoch: currentEpoch >= val.WithdrawableEpoch(),
IsSlashed: val.Slashed,
IsWithdrawableCurrentEpoch: currentEpoch >= val.WithdrawableEpoch,
}
// Set validator's active status for current epoch.
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
if helpers.IsActiveCompactValidator(val, currentEpoch) {
v.IsActiveCurrentEpoch = true
bal.ActiveCurrentEpoch, err = math.Add64(bal.ActiveCurrentEpoch, val.EffectiveBalance())
bal.ActiveCurrentEpoch, err = math.Add64(bal.ActiveCurrentEpoch, val.EffectiveBalance)
if err != nil {
return err
}
}
// Set validator's active status for previous epoch.
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
if helpers.IsActiveCompactValidator(val, prevEpoch) {
v.IsActivePrevEpoch = true
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance)
if err != nil {
return err
}

View File

@@ -34,12 +34,12 @@ func BaseReward(s state.ReadOnlyBeaconState, index primitives.ValidatorIndex) (u
// BaseRewardWithTotalBalance calculates the base reward with the provided total balance.
func BaseRewardWithTotalBalance(s state.ReadOnlyBeaconState, index primitives.ValidatorIndex, totalBalance uint64) (uint64, error) {
val, err := s.ValidatorAtIndexReadOnly(index)
effBal, err := s.EffectiveBalanceAtIndex(index)
if err != nil {
return 0, err
}
cfg := params.BeaconConfig()
increments := val.EffectiveBalance() / cfg.EffectiveBalanceIncrement
increments := effBal / cfg.EffectiveBalanceIncrement
baseRewardPerInc, err := BaseRewardPerIncrement(totalBalance)
if err != nil {
return 0, err

View File

@@ -60,6 +60,7 @@ go_test(
"block_operations_fuzz_test.go",
"block_regression_test.go",
"eth1_data_test.go",
"exit_builder_test.go",
"exit_test.go",
"exports_test.go",
"genesis_test.go",

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
@@ -62,6 +63,16 @@ func ProcessVoluntaryExits(
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
}
// [New in Gloas:EIP7732] Builder exits are identified by the builder index flag.
if beaconState.Version() >= version.Gloas && exit.Exit.ValidatorIndex.IsBuilderIndex() {
if err := verifyBuilderExitAndSignature(beaconState, exit); err != nil {
return nil, errors.Wrapf(err, "could not verify builder exit %d", idx)
}
if err := gloas.InitiateBuilderExit(beaconState, exit.Exit.ValidatorIndex.ToBuilderIndex()); err != nil {
return nil, err
}
continue
}
val, err := beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
if err != nil {
return nil, err
@@ -102,19 +113,24 @@ func ProcessVoluntaryExits(
// initiate_validator_exit(state, voluntary_exit.validator_index)
func VerifyExitAndSignature(
validator state.ReadOnlyValidator,
state state.ReadOnlyBeaconState,
st state.ReadOnlyBeaconState,
signed *ethpb.SignedVoluntaryExit,
) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
fork := state.Fork()
genesisRoot := state.GenesisValidatorsRoot()
// [New in Gloas:EIP7732] Builder exits are verified separately.
if st.Version() >= version.Gloas && signed.Exit.ValidatorIndex.IsBuilderIndex() {
return verifyBuilderExitAndSignature(st, signed)
}
fork := st.Fork()
genesisRoot := st.GenesisValidatorsRoot()
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
// This allows for signed validator exits to be valid forever.
if state.Version() >= version.Deneb {
if st.Version() >= version.Deneb {
fork = &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
@@ -123,7 +139,7 @@ func VerifyExitAndSignature(
}
exit := signed.Exit
if err := verifyExitConditions(state, validator, exit); err != nil {
if err := verifyExitConditions(st, validator, exit); err != nil {
return err
}
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
@@ -198,3 +214,57 @@ func verifyExitConditions(st state.ReadOnlyBeaconState, validator state.ReadOnly
return nil
}
// verifyBuilderExitAndSignature validates a builder voluntary exit.
// [New in Gloas:EIP7732]
func verifyBuilderExitAndSignature(st state.ReadOnlyBeaconState, signed *ethpb.SignedVoluntaryExit) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
exit := signed.Exit
builderIndex := exit.ValidatorIndex.ToBuilderIndex()
// Exits must specify an epoch when they become valid; they are not valid before then.
currentEpoch := slots.ToEpoch(st.Slot())
if currentEpoch < exit.Epoch {
return fmt.Errorf("expected current epoch >= exit epoch, received %d < %d", currentEpoch, exit.Epoch)
}
// Verify the builder is active.
active, err := st.IsActiveBuilder(builderIndex)
if err != nil {
return errors.Wrap(err, "could not check if builder is active")
}
if !active {
return fmt.Errorf("builder %d is not active", builderIndex)
}
// Only exit builder if it has no pending balance to withdraw.
pendingBalance, err := st.BuilderPendingBalanceToWithdraw(builderIndex)
if err != nil {
return errors.Wrap(err, "could not get builder pending balance to withdraw")
}
if pendingBalance != 0 {
return fmt.Errorf("builder %d has pending balance to withdraw: %d", builderIndex, pendingBalance)
}
// Verify signature using builder pubkey with Capella fork version (EIP-7044).
pubkey, err := st.BuilderPubkey(builderIndex)
if err != nil {
return errors.Wrap(err, "could not get builder pubkey")
}
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
genesisRoot := st.GenesisValidatorsRoot()
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
if err != nil {
return err
}
if err := signing.VerifySigningRoot(exit, pubkey[:], signed.Signature, domain); err != nil {
return signing.ErrSigFailedToVerify
}
return nil
}

View File

@@ -0,0 +1,307 @@
package blocks_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
// setGloasTestConfig sets fork epochs so Gloas is active at epoch 5.
func setGloasTestConfig(t *testing.T) {
t.Helper()
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
cfg.GloasForkEpoch = 5
params.SetActiveTestCleanup(t, cfg)
}
// newGloasStateWithBuilder creates a minimal Gloas beacon state with one active builder
// and returns the state along with the builder's BLS private key.
func newGloasStateWithBuilder(t *testing.T, builderIndex primitives.BuilderIndex, epoch primitives.Epoch) (state.BeaconState, bls.SecretKey) {
t.Helper()
priv, err := bls.RandKey()
require.NoError(t, err)
cfg := params.BeaconConfig()
builder := &ethpb.Builder{
Pubkey: priv.PublicKey().Marshal(),
WithdrawableEpoch: cfg.FarFutureEpoch,
DepositEpoch: 0,
Balance: 32_000_000_000,
ExecutionAddress: make([]byte, 20),
}
builders := make([]*ethpb.Builder, int(builderIndex)+1)
for i := range builders {
if primitives.BuilderIndex(i) == builderIndex {
builders[i] = builder
} else {
builders[i] = &ethpb.Builder{
Pubkey: make([]byte, 48),
WithdrawableEpoch: cfg.FarFutureEpoch,
DepositEpoch: 0,
ExecutionAddress: make([]byte, 20),
}
}
}
stProto := &ethpb.BeaconStateGloas{
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
Fork: &ethpb.Fork{
PreviousVersion: cfg.FuluForkVersion,
CurrentVersion: cfg.GloasForkVersion,
Epoch: cfg.GloasForkEpoch,
},
GenesisValidatorsRoot: make([]byte, 32),
FinalizedCheckpoint: &ethpb.Checkpoint{
Epoch: epoch - 1,
Root: make([]byte, 32),
},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Builders: builders,
Validators: []*ethpb.Validator{
{
ExitEpoch: cfg.FarFutureEpoch,
ActivationEpoch: 0,
PublicKey: make([]byte, 48),
},
},
Balances: []uint64{32_000_000_000},
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
}
for i := range stProto.BlockRoots {
stProto.BlockRoots[i] = make([]byte, 32)
}
for i := range stProto.StateRoots {
stProto.StateRoots[i] = make([]byte, 32)
}
for i := range stProto.RandaoMixes {
stProto.RandaoMixes[i] = make([]byte, 32)
}
st, err := state_native.InitializeFromProtoUnsafeGloas(stProto)
require.NoError(t, err)
return st, priv
}
func signBuilderExit(t *testing.T, st state.ReadOnlyBeaconState, exit *ethpb.VoluntaryExit, priv bls.SecretKey) *ethpb.SignedVoluntaryExit {
t.Helper()
sb, err := signing.ComputeDomainAndSign(st, exit.Epoch, exit, params.BeaconConfig().DomainVoluntaryExit, priv)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
return &ethpb.SignedVoluntaryExit{
Exit: exit,
Signature: sig.Marshal(),
}
}
func TestVerifyExitAndSignature_BuilderExit_HappyPath(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
err := blocks.VerifyExitAndSignature(nil, st, signed)
require.NoError(t, err)
}
func TestVerifyExitAndSignature_BuilderNotActive(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
// Make builder not active by setting withdrawable epoch (already initiated exit).
builder, err := st.Builder(builderIndex)
require.NoError(t, err)
builder.WithdrawableEpoch = 5
require.NoError(t, st.UpdateBuilderAtIndex(builderIndex, builder))
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
err = blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "is not active", err)
}
func TestVerifyExitAndSignature_BuilderPendingWithdrawal(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
// Give the builder a pending withdrawal.
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{
{
BuilderIndex: builderIndex,
Amount: 1000,
FeeRecipient: make([]byte, 20),
},
}))
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
err := blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "pending balance to withdraw", err)
}
func TestVerifyExitAndSignature_BuilderBadSignature(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, _ := newGloasStateWithBuilder(t, builderIndex, epoch)
wrongKey, err := bls.RandKey()
require.NoError(t, err)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, wrongKey)
err = blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "signature did not verify", err)
}
func TestVerifyExitAndSignature_BuilderExitInFuture(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch + 1, // Future epoch.
}
signed := signBuilderExit(t, st, exit, priv)
err := blocks.VerifyExitAndSignature(nil, st, signed)
assert.ErrorContains(t, "expected current epoch >= exit epoch", err)
}
func TestProcessVoluntaryExits_BuilderExit(t *testing.T) {
setGloasTestConfig(t)
builderIndex := primitives.BuilderIndex(0)
epoch := primitives.Epoch(10)
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
exit := &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
}
signed := signBuilderExit(t, st, exit, priv)
newState, err := blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
require.NoError(t, err)
// Verify builder's withdrawable epoch was set.
builder, err := newState.Builder(builderIndex)
require.NoError(t, err)
cfg := params.BeaconConfig()
expectedWithdrawableEpoch := epoch + cfg.MinBuilderWithdrawabilityDelay
assert.Equal(t, expectedWithdrawableEpoch, builder.WithdrawableEpoch)
}
func TestProcessVoluntaryExits_BuilderExitPreGloas(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
cfg.GloasForkEpoch = 100 // Gloas not yet active.
params.SetActiveTestCleanup(t, cfg)
epoch := primitives.Epoch(10)
builderIndex := primitives.BuilderIndex(0)
stProto := &ethpb.BeaconStateFulu{
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
Fork: &ethpb.Fork{
PreviousVersion: cfg.DenebForkVersion,
CurrentVersion: cfg.FuluForkVersion,
Epoch: cfg.FuluForkEpoch,
},
GenesisValidatorsRoot: make([]byte, 32),
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
Validators: []*ethpb.Validator{
{ExitEpoch: cfg.FarFutureEpoch, ActivationEpoch: 0, PublicKey: make([]byte, 48)},
},
Balances: []uint64{32_000_000_000},
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
}
for i := range stProto.BlockRoots {
stProto.BlockRoots[i] = make([]byte, 32)
}
for i := range stProto.StateRoots {
stProto.StateRoots[i] = make([]byte, 32)
}
for i := range stProto.RandaoMixes {
stProto.RandaoMixes[i] = make([]byte, 32)
}
st, err := state_native.InitializeFromProtoUnsafeFulu(stProto)
require.NoError(t, err)
signed := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
ValidatorIndex: builderIndex.ToValidatorIndex(),
Epoch: epoch,
},
Signature: make([]byte, 96),
}
// On pre-Gloas state, builder-flagged exits are not routed to the builder path.
// ProcessVoluntaryExits treats the builder-flagged index as a regular validator index,
// which fails because no such validator exists.
_, err = blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
require.ErrorContains(t, "out of bounds", err)
}

View File

@@ -2,4 +2,5 @@ package blocks
var ProcessBLSToExecutionChange = processBLSToExecutionChange
var ErrInvalidBLSPrefix = errInvalidBLSPrefix
var ErrInvalidWithdrawalCredentials = errInvalidWithdrawalCredentials
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount

View File

@@ -147,7 +147,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
_, err = blocks.ValidateBLSToExecutionChange(st, signed)
// The state should return an empty validator, even when the validator object in the registry is
// nil. This error should return when the withdrawal credentials are invalid or too short.
require.ErrorIs(t, err, blocks.ErrInvalidBLSPrefix)
require.ErrorIs(t, err, blocks.ErrInvalidWithdrawalCredentials)
})
t.Run("non-existent validator", func(t *testing.T) {
priv, err := bls.RandKey()

View File

@@ -28,6 +28,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/state-native/custom-types:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -20,37 +20,46 @@ import (
)
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
st := stateWithActiveBalanceETH(t, 1000)
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
validators := st.Validators()
sk, err := bls.RandKey()
require.NoError(t, err)
for i := 0; i < len(deps); i += 1 {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
validators[i].PublicKey = sk.PublicKey().Marshal()
validators[i].WithdrawalCredentials = wc
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
}
require.NoError(t, st.SetPendingDeposits(deps))
const (
depositCount = uint64(2)
amountETH = uint64(32)
slot = 0
activeBalanceGwei = 10_000
)
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
state := stateWithActiveBalanceETH(t, 0)
secretKey, err := bls.RandKey()
require.NoError(t, err)
val := st.Validators()
seenPubkeys := make(map[string]struct{})
for i := 0; i < len(val); i += 1 {
if len(val[i].PublicKey) == 0 {
continue
}
_, ok := seenPubkeys[string(val[i].PublicKey)]
if ok {
t.Fatalf("duplicated pubkeys")
} else {
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
}
withdrawalCredentialsBytes := make([]byte, 32)
withdrawalCredentialsBytes[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
withdrawalCredentials := bytesutil.ToBytes32(withdrawalCredentialsBytes)
validators := state.Validators()
require.Equal(t, 0, len(validators))
deposits := make([]*eth.PendingDeposit, 0, depositCount)
for range depositCount {
deposit := stateTesting.GeneratePendingDeposit(t, secretKey, amountETH, withdrawalCredentials, slot)
deposits = append(deposits, deposit)
}
err = state.SetPendingDeposits(deposits)
require.NoError(t, err)
err = electra.ProcessPendingDeposits(t.Context(), state, activeBalanceGwei)
require.NoError(t, err)
// The first deposit should create a new validator,
// and the second deposit should top up the same validator
// We should have 1 validator with balance of 64 ETH.
validators = state.Validators()
require.Equal(t, 1, len(validators))
balance, err := state.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, depositCount*amountETH, balance)
}
func TestProcessPendingDeposits(t *testing.T) {

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
@@ -30,35 +30,34 @@ import (
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
cfg := params.BeaconConfig()
effBalanceInc := cfg.EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / cfg.HysteresisQuotient
downwardThreshold := hysteresisInc * cfg.HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * cfg.HysteresisUpwardMultiplier
minActivationBalance := cfg.MinActivationBalance
maxEffBalanceElectra := cfg.MaxEffectiveBalanceElectra
compoundingPrefix := cfg.CompoundingWithdrawalPrefixByte
bals := st.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val.IsNil() {
return nil, fmt.Errorf("validator %d is nil in state", idx)
}
return st.ApplyToEveryCompactValidator(func(idx int, val *stateutil.CompactValidator) (stateutil.CompactValidator, bool, error) {
if idx >= len(bals) {
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
return stateutil.CompactValidator{}, false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(bals))
}
balance := bals[idx]
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
if val.HasCompoundingWithdrawalCredentials() {
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
effectiveBalanceLimit := minActivationBalance
if val.WithdrawalCredentials[0] == compoundingPrefix {
effectiveBalanceLimit = maxEffBalanceElectra
}
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
updated := *val
updated.EffectiveBalance = effectiveBal
return updated, true, nil
}
return newVal, nil
}
return st.ApplyToEveryValidator(validatorFunc)
return stateutil.CompactValidator{}, false, nil
})
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
@@ -48,19 +49,20 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
finalizedEpoch := st.FinalizedCheckpointEpoch()
if err := st.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
// Collect validators eligible to enter the activation queue.
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
if helpers.IsEligibleForActivationQueueCompact(val, currentEpoch) {
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
}
// Collect validators to eject.
if val.EffectiveBalance() <= ejectionBal && helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
if val.EffectiveBalance <= ejectionBal && helpers.IsActiveCompactValidator(val, currentEpoch) {
eligibleForEjection = append(eligibleForEjection, primitives.ValidatorIndex(idx))
}
// Collect validators eligible for activation and not yet dequeued for activation.
if helpers.IsEligibleForActivationUsingROVal(st, val) {
if helpers.IsEligibleForActivationCompact(val, finalizedEpoch) {
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
@@ -258,14 +259,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
earliestExitEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(beaconState))
preActivationIndices := make([]primitives.ValidatorIndex, 0)
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
earliestExitEpoch = val.ExitEpoch()
if err = beaconState.ForEachValidator(func(index int, val *stateutil.CompactValidator) error {
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch > earliestExitEpoch {
earliestExitEpoch = val.ExitEpoch
}
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
if val.ActivationEpoch == params.BeaconConfig().FarFutureEpoch {
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
}
if val.HasCompoundingWithdrawalCredentials() {
if val.WithdrawalCredentials[0] == params.BeaconConfig().CompoundingWithdrawalPrefixByte {
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
}
return nil

View File

@@ -16,9 +16,6 @@ import (
func TestSwitchToCompoundingValidator(t *testing.T) {
s, err := state_native.InitializeFromProtoElectra(&eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{}, // No withdrawal credentials
},
{
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
},
@@ -27,22 +24,19 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
},
})
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
require.NoError(t, err)
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
// Test that a validator with withdrawal credentials can be switched to compounding.
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
v, err := s.ValidatorAtIndex(1)
require.NoError(t, electra.SwitchToCompoundingValidator(s, 0))
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
// val_1 Balance is not changed
b, err := s.BalanceAtIndex(1)
// val_0 Balance is not changed
b, err := s.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
pbd, err := s.PendingDeposits()
@@ -50,8 +44,8 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
b, err = s.BalanceAtIndex(2)
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
b, err = s.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
pbd, err = s.PendingDeposits()

View File

@@ -59,21 +59,22 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
finalizedEpoch := st.FinalizedCheckpointEpoch()
if err := st.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
// Collect validators eligible to enter the activation queue.
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
if helpers.IsEligibleForActivationQueueCompact(val, currentEpoch) {
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
}
// Collect validators to eject.
isActive := helpers.IsActiveValidatorUsingTrie(val, currentEpoch)
belowEjectionBalance := val.EffectiveBalance() <= ejectionBal
isActive := helpers.IsActiveCompactValidator(val, currentEpoch)
belowEjectionBalance := val.EffectiveBalance <= ejectionBal
if isActive && belowEjectionBalance {
eligibleForEjection = append(eligibleForEjection, primitives.ValidatorIndex(idx))
}
// Collect validators eligible for activation and not yet dequeued for activation.
if helpers.IsEligibleForActivationUsingROVal(st, val) {
if helpers.IsEligibleForActivationCompact(val, finalizedEpoch) {
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
}
@@ -243,15 +244,15 @@ func ProcessSlashings(st state.BeaconState) error {
bals := st.Balances()
changed := false
err = st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch()
if val.Slashed() && correctEpoch {
err = st.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
var penalty uint64
if st.Version() >= version.Electra {
effectiveBalanceIncrements := val.EffectiveBalance() / increment
effectiveBalanceIncrements := val.EffectiveBalance / increment
penalty = penaltyPerEffectiveBalanceIncrement * effectiveBalanceIncrements
} else {
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty = penaltyNumerator / totalBalance * increment
}
bals[idx] = helpers.DecreaseBalanceWithVal(bals[idx], penalty)
@@ -310,35 +311,31 @@ func ProcessEth1DataReset(state state.BeaconState) (state.BeaconState, error) {
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, error) {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
maxEffBalance := params.BeaconConfig().MaxEffectiveBalance
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
cfg := params.BeaconConfig()
effBalanceInc := cfg.EffectiveBalanceIncrement
maxEffBalance := cfg.MaxEffectiveBalance
hysteresisInc := effBalanceInc / cfg.HysteresisQuotient
downwardThreshold := hysteresisInc * cfg.HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * cfg.HysteresisUpwardMultiplier
bals := st.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val == nil {
return nil, fmt.Errorf("validator %d is nil in state", idx)
}
if err := st.ApplyToEveryCompactValidator(func(idx int, val *stateutil.CompactValidator) (stateutil.CompactValidator, bool, error) {
if idx >= len(bals) {
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
return stateutil.CompactValidator{}, false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(bals))
}
balance := bals[idx]
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
if effectiveBal != val.EffectiveBalance() {
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
if effectiveBal != val.EffectiveBalance {
updated := *val
updated.EffectiveBalance = effectiveBal
return updated, true, nil
}
}
return
}
if err := st.ApplyToEveryValidator(validatorFunc); err != nil {
return stateutil.CompactValidator{}, false, nil
}); err != nil {
return nil, err
}

View File

@@ -20,6 +20,7 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",

View File

@@ -9,6 +9,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/pkg/errors"
@@ -27,23 +28,23 @@ func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, erro
currentEpoch := time.CurrentEpoch(s)
prevEpoch := time.PrevEpoch(s)
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
// Was validator withdrawable or slashed
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch()
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch
pVal := &Validator{
IsSlashed: val.Slashed(),
IsSlashed: val.Slashed,
IsWithdrawableCurrentEpoch: withdrawable,
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
CurrentEpochEffectiveBalance: val.EffectiveBalance,
}
// Was validator active current epoch
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
if helpers.IsActiveCompactValidator(val, currentEpoch) {
pVal.IsActiveCurrentEpoch = true
pBal.ActiveCurrentEpoch += val.EffectiveBalance()
pBal.ActiveCurrentEpoch += val.EffectiveBalance
}
// Was validator active previous epoch
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
if helpers.IsActiveCompactValidator(val, prevEpoch) {
pVal.IsActivePrevEpoch = true
pBal.ActivePrevEpoch += val.EffectiveBalance()
pBal.ActivePrevEpoch += val.EffectiveBalance
}
// Set inclusion slot and inclusion distance to be max, they will be compared and replaced
// with the lower values

View File

@@ -4,6 +4,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
)
@@ -25,9 +26,9 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
var hasSlashing bool
// Iterate through validator list in state, stop until a validator satisfies slashing condition of current epoch.
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch()
if val.Slashed() && correctEpoch {
err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
hasSlashing = true
}
return nil
@@ -42,16 +43,16 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
increment := params.BeaconConfig().EffectiveBalanceIncrement
bals := s.Balances()
validatorFunc := func(idx int, val state.ReadOnlyValidator) error {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch()
if val.Slashed() && correctEpoch {
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
validatorFunc := func(idx int, val *stateutil.CompactValidator) error {
correctEpoch := epochToWithdraw == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / pBal.ActiveCurrentEpoch * increment
bals[idx] = helpers.DecreaseBalanceWithVal(bals[idx], penalty)
}
return nil
}
if err := s.ReadFromEveryValidator(validatorFunc); err != nil {
if err := s.ForEachValidator(validatorFunc); err != nil {
return err
}
return s.SetBalances(bals)

View File

@@ -46,6 +46,9 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// PayloadAttestationMessageReceived is sent after a payload attestation message is received from gossip or rpc.
PayloadAttestationMessageReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -114,3 +117,8 @@ type DataColumnReceivedData struct {
BlockRoot [32]byte
KzgCommitments [][]byte
}
// PayloadAttestationMessageReceivedData is the data sent with PayloadAttestationMessageReceived events.
type PayloadAttestationMessageReceivedData struct {
Message *ethpb.PayloadAttestationMessage
}

View File

@@ -33,6 +33,8 @@ const (
LightClientOptimisticUpdate
// PayloadAttributes events are fired upon a missed slot or new head.
PayloadAttributes
// PayloadProcessed is sent after a payload envelope has been processed.
PayloadProcessed
)
// BlockProcessedData is the data sent with BlockProcessed events.
@@ -72,3 +74,9 @@ type InitializedData struct {
// GenesisValidatorsRoot represents state.validators.HashTreeRoot().
GenesisValidatorsRoot []byte
}
// PayloadProcessedData is the data sent with PayloadProcessed events.
type PayloadProcessedData struct {
Slot primitives.Slot
BlockRoot [32]byte
}

View File

@@ -5,8 +5,10 @@ go_library(
srcs = [
"attestation.go",
"bid.go",
"builder_exit.go",
"deposit_request.go",
"log.go",
"metrics.go",
"payload.go",
"payload_attestation.go",
"pending_payment.go",
@@ -38,6 +40,8 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,37 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// InitiateBuilderExit initiates the exit of a builder by setting its withdrawable epoch.
//
// <spec fn="initiate_builder_exit" fork="gloas" hash="3da938d5">
// def initiate_builder_exit(state: BeaconState, builder_index: BuilderIndex) -> None:
// """
// Initiate the exit of the builder with index ``index``.
// """
// # Return if builder already initiated exit
// builder = state.builders[builder_index]
// if builder.withdrawable_epoch != FAR_FUTURE_EPOCH:
// return
//
// # Set builder exit epoch
// builder.withdrawable_epoch = get_current_epoch(state) + MIN_BUILDER_WITHDRAWABILITY_DELAY
// </spec>
func InitiateBuilderExit(s state.BeaconState, builderIndex primitives.BuilderIndex) error {
builder, err := s.Builder(builderIndex)
if err != nil {
return err
}
// Return if builder already initiated exit.
if builder.WithdrawableEpoch != params.BeaconConfig().FarFutureEpoch {
return nil
}
currentEpoch := slots.ToEpoch(s.Slot())
builder.WithdrawableEpoch = currentEpoch + params.BeaconConfig().MinBuilderWithdrawabilityDelay
return s.UpdateBuilderAtIndex(builderIndex, builder)
}

View File

@@ -65,26 +65,37 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
// )
// </spec>
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
var err error
defer func() {
if err == nil {
builderDepositsProcessedTotal.Inc()
}
}()
if request == nil {
return errors.New("nil deposit request")
err = errors.New("nil deposit request")
return err
}
applied, err := applyBuilderDepositRequest(beaconState, request)
var applied bool
applied, err = applyBuilderDepositRequest(beaconState, request)
if err != nil {
return errors.Wrap(err, "could not apply builder deposit")
err = errors.Wrap(err, "could not apply builder deposit")
return err
}
if applied {
return nil
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
if err = beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: request.Pubkey,
WithdrawalCredentials: request.WithdrawalCredentials,
Amount: request.Amount,
Signature: request.Signature,
Slot: beaconState.Slot(),
}); err != nil {
return errors.Wrap(err, "could not append deposit request")
err = errors.Wrap(err, "could not append deposit request")
return err
}
return nil
}

View File

@@ -0,0 +1,27 @@
package gloas
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
builderPendingPaymentsProcessedTotal = promauto.NewCounter(
prometheus.CounterOpts{
Name: "builder_pending_payments_processed_total",
Help: "The number of builder pending payments promoted into the builder pending withdrawal queue.",
},
)
builderDepositsProcessedTotal = promauto.NewCounter(
prometheus.CounterOpts{
Name: "builder_deposits_processed_total",
Help: "The number of builder-related deposit requests processed.",
},
)
builderExitsProcessedTotal = promauto.NewCounter(
prometheus.CounterOpts{
Name: "builder_exits_processed_total",
Help: "The number of processed builder exits.",
},
)
)

View File

@@ -225,7 +225,20 @@ func ApplyExecutionPayload(
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
}
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
if err := ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), [32]byte(payload.BlockHash())); err != nil {
return err
}
return nil
}
func ApplyExecutionPayloadStateMutations(
ctx context.Context,
st state.BeaconState,
executionRequests *enginev1.ExecutionRequests,
blockHash [32]byte,
) error {
if err := processExecutionRequests(ctx, st, executionRequests); err != nil {
return errors.Wrap(err, "could not process execution requests")
}
@@ -237,7 +250,7 @@ func ApplyExecutionPayload(
return errors.Wrap(err, "could not set execution payload availability")
}
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
if err := st.SetLatestBlockHash(blockHash); err != nil {
return errors.Wrap(err, "could not set latest block hash")
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
stderrors "errors"
"fmt"
"slices"
@@ -23,6 +24,8 @@ import (
"github.com/pkg/errors"
)
var ErrValidatorNotInPTC = stderrors.New("validator not in PTC")
// ProcessPayloadAttestations validates payload attestations in a block body.
//
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
@@ -156,6 +159,24 @@ func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
return selected, nil
}
// PayloadCommitteeIndex returns the validator's index position in the payload committee for a slot.
func PayloadCommitteeIndex(
ctx context.Context,
st state.ReadOnlyBeaconState,
slot primitives.Slot,
validatorIndex primitives.ValidatorIndex,
) (uint64, error) {
ptc, err := PayloadCommittee(ctx, st, slot)
if err != nil {
return 0, err
}
idx := slices.Index(ptc, validatorIndex)
if idx == -1 {
return 0, fmt.Errorf("%w: validator=%d slot=%d", ErrValidatorNotInPTC, validatorIndex, slot)
}
return uint64(idx), nil
}
// ptcSeed computes the seed for the payload timeliness committee.
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
@@ -254,12 +275,12 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
offset := (round % 16) * 2
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
val, err := st.ValidatorAtIndex(idx)
val, err := st.ValidatorAtIndexReadOnly(idx)
if err != nil {
return false, errors.Wrapf(err, "validator %d", idx)
}
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
return val.EffectiveBalance()*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.

View File

@@ -211,7 +211,8 @@ func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
errIndex: 0,
}
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
require.ErrorContains(t, "failed to verify indexed form", err)
require.ErrorContains(t, "failed to convert to indexed form", err)
require.ErrorContains(t, "failed to sample beacon committee 0", err)
require.ErrorContains(t, "validator 0", err)
}

View File

@@ -242,6 +242,23 @@ func TestProcessExecutionPayload_Success(t *testing.T) {
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
newHash := [32]byte{}
newHash[0] = 0x99
require.NoError(t, ApplyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
require.Equal(t, newHash, latestHash)
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
}
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)

View File

@@ -52,6 +52,7 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
if err := state.RotateBuilderPendingPayments(); err != nil {
return errors.Wrap(err, "could not rotate builder pending payments")
}
builderPendingPaymentsProcessedTotal.Add(float64(len(withdrawals)))
return nil
}

View File

@@ -29,6 +29,7 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -12,6 +12,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -462,8 +463,8 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
}
indices := make([]primitives.ValidatorIndex, 0, s.NumValidators())
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
if err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
if IsActiveCompactValidator(val, epoch) {
indices = append(indices, primitives.ValidatorIndex(idx))
}
return nil

View File

@@ -5,6 +5,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
mathutil "github.com/OffchainLabs/prysm/v7/math"
@@ -69,9 +70,9 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
total := uint64(0)
epoch := slots.ToEpoch(s.Slot())
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
total += val.EffectiveBalance()
if err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
if IsActiveCompactValidator(val, epoch) {
total += val.EffectiveBalance
}
return nil
}); err != nil {

View File

@@ -121,7 +121,7 @@ func IsNextPeriodSyncCommittee(
// CurrentPeriodSyncSubcommitteeIndices returns the subcommittee indices of the
// current period sync committee for input validator.
func CurrentPeriodSyncSubcommitteeIndices(
st state.BeaconState, valIdx primitives.ValidatorIndex,
st state.ReadOnlyBeaconState, valIdx primitives.ValidatorIndex,
) ([]primitives.CommitteeIndex, error) {
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {

View File

@@ -9,6 +9,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -137,8 +138,8 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
}()
var indices []primitives.ValidatorIndex
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
if err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
if IsActiveCompactValidator(val, epoch) {
indices = append(indices, primitives.ValidatorIndex(idx))
}
return nil
@@ -190,8 +191,8 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
}()
count := uint64(0)
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if IsActiveValidatorUsingTrie(val, epoch) {
if err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
if IsActiveCompactValidator(val, epoch) {
count++
}
return nil
@@ -523,6 +524,32 @@ func isEligibleForActivation(activationEligibilityEpoch, activationEpoch, finali
activationEpoch == params.BeaconConfig().FarFutureEpoch
}
// --- CompactValidator direct-access helpers (zero-alloc) ---
// IsActiveCompactValidator checks if a compact validator is active at the given epoch.
func IsActiveCompactValidator(cv *stateutil.CompactValidator, epoch primitives.Epoch) bool {
return checkValidatorActiveStatus(cv.ActivationEpoch, cv.ExitEpoch, epoch)
}
// IsEligibleForActivationQueueCompact checks if a compact validator is eligible
// to be placed into the activation queue.
func IsEligibleForActivationQueueCompact(cv *stateutil.CompactValidator, currentEpoch primitives.Epoch) bool {
if currentEpoch >= params.BeaconConfig().ElectraForkEpoch {
return isEligibleForActivationQueueElectra(cv.ActivationEligibilityEpoch, cv.EffectiveBalance)
}
return isEligibleForActivationQueue(cv.ActivationEligibilityEpoch, cv.EffectiveBalance)
}
// IsEligibleForActivationCompact checks if a compact validator is eligible for activation.
func IsEligibleForActivationCompact(cv *stateutil.CompactValidator, finalizedEpoch primitives.Epoch) bool {
return isEligibleForActivation(cv.ActivationEligibilityEpoch, cv.ActivationEpoch, finalizedEpoch)
}
// IsActiveNonSlashedCompactValidator checks if a compact validator is active and not slashed.
func IsActiveNonSlashedCompactValidator(cv *stateutil.CompactValidator, epoch primitives.Epoch) bool {
return checkValidatorActiveStatus(cv.ActivationEpoch, cv.ExitEpoch, epoch) && !cv.Slashed
}
// LastActivatedValidatorIndex provides the last activated validator given a state
func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconState) (primitives.ValidatorIndex, error) {
_, span := trace.StartSpan(ctx, "helpers.LastActivatedValidatorIndex")

View File

@@ -63,11 +63,33 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
// This is done to improve performance since the internal KZG library is way more
// efficient when verifying in batch.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#modified-verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
commitmentsBySidecar := make([][][]byte, len(sidecars))
for i := range sidecars {
commitmentsBySidecar[i] = sidecars[i].KzgCommitments
}
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
}
// VerifyDataColumnsSidecarKZGProofsWithCommitments verifies KZG proofs using
// explicitly provided commitments instead of the sidecar's own. This is used
// by Gloas, which validates against bid.blob_kzg_commitments.
func VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars []blocks.RODataColumn, commitmentsBySidecar [][][]byte) error {
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
}
func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmentsBySidecar [][][]byte) error {
if len(sidecars) != len(commitmentsBySidecar) {
return ErrMismatchLength
}
// Compute the total count.
count := 0
for _, sidecar := range sidecars {
for i, sidecar := range sidecars {
if len(sidecar.Column) != len(commitmentsBySidecar[i]) {
return ErrMismatchLength
}
count += len(sidecar.Column)
}
@@ -76,7 +98,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for _, sidecar := range sidecars {
for sidecarIndex, sidecar := range sidecars {
for i := range sidecar.Column {
var (
commitment kzg.Bytes48
@@ -84,7 +106,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
proof kzg.Bytes48
)
commitmentBytes := sidecar.KzgCommitments[i]
commitmentBytes := commitmentsBySidecar[sidecarIndex][i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]

View File

@@ -89,6 +89,12 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.NoError(t, err)
})
t.Run("with commitments", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars, sidecarCommitments(sidecars))
require.NoError(t, err)
})
}
func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
@@ -348,6 +354,14 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
}
}
func sidecarCommitments(sidecars []blocks.RODataColumn) [][][]byte {
commitmentsBySidecar := make([][][]byte, len(sidecars))
for i := range sidecars {
commitmentsBySidecar[i] = sidecars[i].KzgCommitments
}
return commitmentsBySidecar
}
func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgProofs [][]byte) blocks.RODataColumn {
pbSignedBeaconBlock := util.NewBeaconBlockDeneb()
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)

View File

@@ -120,7 +120,8 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
compactValidators := stateutil.CompactValidatorsFromProto(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(compactValidators)
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}
@@ -224,7 +225,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
BodyRoot: bodyRoot[:],
}
ist, err := state_native.InitializeFromProtoBellatrix(st)
ist, err := state_native.InitializeFromProtoUnsafeBellatrix(st)
if err != nil {
return nil, err
}
@@ -276,5 +277,5 @@ func EmptyGenesisStateBellatrix() (state.BeaconState, error) {
},
}
return state_native.InitializeFromProtoBellatrix(st)
return state_native.InitializeFromProtoUnsafeBellatrix(st)
}

View File

@@ -147,7 +147,8 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
compactValidators := stateutil.CompactValidatorsFromProto(preState.Validators())
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(compactValidators)
if err != nil {
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
}
@@ -217,7 +218,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
BodyRoot: bodyRoot[:],
}
return state_native.InitializeFromProtoPhase0(st)
return state_native.InitializeFromProtoUnsafePhase0(st)
}
// EmptyGenesisState returns an empty beacon state object.
@@ -259,7 +260,7 @@ func EmptyGenesisState() (state.BeaconState, error) {
Eth1DataVotes: []*ethpb.Eth1Data{},
Eth1DepositIndex: 0,
}
return state_native.InitializeFromProtoPhase0(st)
return state_native.InitializeFromProtoUnsafePhase0(st)
}
// IsValidGenesisState gets called whenever there's a deposit event,

View File

@@ -55,7 +55,7 @@ func NextSlotState(root []byte, wantedSlot types.Slot) state.BeaconState {
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
// by calling `ProcessSlots`, it also saves the input root for later look up.
// This is useful to call after successfully processing a block.
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconState) error {
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.ReadOnlyBeaconState) error {
// Advancing one slot by using a copied state.
copied := state.Copy()
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)

View File

@@ -159,6 +159,15 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
return state, nil
}
// ProcessSlotsIfNeeded takes a ReadOnlyBeaconState and processes it only if its needed, it returns a ReadOnlyBeaconState
func ProcessSlotsIfNeeded(ctx context.Context, state state.ReadOnlyBeaconState, accessRoot []byte, slot primitives.Slot) (state.ReadOnlyBeaconState, error) {
if slot <= state.Slot() {
return state, nil
}
copied := state.Copy()
return ProcessSlotsUsingNextSlotCache(ctx, copied, accessRoot, slot)
}
// ProcessSlotsUsingNextSlotCache processes slots by using next slot cache for higher efficiency.
func ProcessSlotsUsingNextSlotCache(
ctx context.Context,

View File

@@ -10,7 +10,9 @@ import (
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
engine "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/stretchr/testify/require"
@@ -73,12 +75,12 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
ExecutionPayloadAvailability: availability,
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
@@ -139,3 +141,217 @@ func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
BodyRoot: make([]byte, 32),
}
}
// newGloasForkBoundaryState returns a Gloas BeaconState where IsParentBlockFull()==true
// because bid.BlockHash == latestBlockHash. The parentBlockRoot parameter controls
// whether the bid looks like an upgrade-seed (all-zeros) or a real committed bid (non-zero).
func newGloasForkBoundaryState(
t *testing.T,
slot primitives.Slot,
blockHash [32]byte,
parentBlockRoot [32]byte,
) state.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
protoState := &ethpb.BeaconStateGloas{
Slot: slot,
LatestBlockHeader: testBeaconBlockHeader(),
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
ExecutionPayloadAvailability: availability,
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
// bid.BlockHash == LatestBlockHash so that IsParentBlockFull() returns true.
LatestBlockHash: blockHash[:],
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
PreviousEpochParticipation: []byte{},
CurrentEpochParticipation: []byte{},
JustificationBits: []byte{0},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
PayloadExpectedWithdrawals: make([]*engine.Withdrawal, 0),
ProposerLookahead: make([]uint64, 0),
Builders: make([]*ethpb.Builder, 0),
}
for i := range protoState.BlockRoots {
protoState.BlockRoots[i] = make([]byte, 32)
}
for i := range protoState.StateRoots {
protoState.StateRoots[i] = make([]byte, 32)
}
for i := range protoState.RandaoMixes {
protoState.RandaoMixes[i] = make([]byte, 32)
}
for i := range protoState.BuilderPendingPayments {
protoState.BuilderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
}
}
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
for i := range pubkeys {
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
protoState.CurrentSyncCommittee = &ethpb.SyncCommittee{Pubkeys: pubkeys, AggregatePubkey: aggPubkey}
protoState.NextSyncCommittee = &ethpb.SyncCommittee{Pubkeys: pubkeys, AggregatePubkey: aggPubkey}
st, err := state_native.InitializeFromProtoGloas(protoState)
require.NoError(t, err)
return st
}
// newGloasTestBlock returns an ROBlock at the given slot with the given parentRoot.
func newGloasTestBlock(t *testing.T, slot primitives.Slot, parentRoot [32]byte) consensusblocks.ROBlock {
t.Helper()
blkProto := &ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: slot,
ParentRoot: parentRoot[:],
StateRoot: make([]byte, 32),
Body: &ethpb.BeaconBlockBodyGloas{
RandaoReveal: make([]byte, fieldparams.BLSSignatureLength),
Graffiti: make([]byte, 32),
Eth1Data: &ethpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
SignedExecutionPayloadBid: &ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
Slot: slot,
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{},
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
PayloadAttestations: []*ethpb.PayloadAttestation{},
},
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
}
wsb, err := consensusblocks.NewSignedBeaconBlock(blkProto)
require.NoError(t, err)
rob, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
return rob
}
// TestProcessSlotsForBlock_UpgradeSeededBid verifies that ProcessSlotsForBlock uses
// b.ParentRoot() as the NSC access key when the state has an upgrade-seeded bid
// (bid.ParentBlockRoot == zero). This guards against the Fulu->Gloas fork-boundary
// false positive where UpgradeToGloas seeds bid.BlockHash == latestBlockHash while
// leaving bid.ParentBlockRoot as all-zeros.
func TestProcessSlotsForBlock_UpgradeSeededBid(t *testing.T) {
ctx := context.Background()
parentRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := [32]byte{0xAA, 0xBB, 0xCC}
targetSlot := primitives.Slot(9)
// Build a Gloas state at slot 8 with IsParentBlockFull()==true but
// bid.ParentBlockRoot==zero (upgrade-seeded: not a real committed bid).
st := newGloasForkBoundaryState(t, targetSlot-1, blockHash, [32]byte{})
require.Equal(t, version.Gloas, st.Version())
// Verify preconditions.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.True(t, full, "precondition: IsParentBlockFull must be true")
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
require.Equal(t, [32]byte{}, bid.ParentBlockRoot(), "upgrade-seeded bid must have zero ParentBlockRoot")
// Prime NSC with parentRoot as the access key.
// With the guard in place (realBid==false), ProcessSlotsForBlock will use
// b.ParentRoot() as the NSC key and find this cached entry.
require.NoError(t, UpdateNextSlotCache(ctx, parentRoot[:], st))
blk := newGloasTestBlock(t, targetSlot, parentRoot)
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
require.NoError(t, err)
require.Equal(t, targetSlot, out.Slot())
// Verify that the NSC entry primed under parentRoot is still present,
// confirming it was used (read) rather than bypassed.
cached := NextSlotState(parentRoot[:], targetSlot)
require.NotNil(t, cached, "NSC entry under parentRoot should still be present after use")
}
// TestProcessSlotsForBlock_RealBid verifies that ProcessSlotsForBlock uses
// LatestBlockHash as the NSC access key when the state has a real committed bid
// (bid.ParentBlockRoot != zero). This is the normal post-fork case.
func TestProcessSlotsForBlock_RealBid(t *testing.T) {
ctx := context.Background()
parentRoot := [32]byte{0x01, 0x02, 0x03}
blockHash := [32]byte{0xAA, 0xBB, 0xCC}
realParentBlockRoot := [32]byte{0xDE, 0xAD, 0xBE, 0xEF}
targetSlot := primitives.Slot(9)
// Build a Gloas state at slot 8 with IsParentBlockFull()==true and
// bid.ParentBlockRoot!=zero (a real committed bid).
st := newGloasForkBoundaryState(t, targetSlot-1, blockHash, realParentBlockRoot)
require.Equal(t, version.Gloas, st.Version())
// Verify preconditions.
full, err := st.IsParentBlockFull()
require.NoError(t, err)
require.True(t, full, "precondition: IsParentBlockFull must be true")
bid, err := st.LatestExecutionPayloadBid()
require.NoError(t, err)
require.NotEqual(t, [32]byte{}, bid.ParentBlockRoot(), "real bid must have non-zero ParentBlockRoot")
// Prime NSC with the EL block hash as access key.
// With the guard in place (realBid==true), ProcessSlotsForBlock will use
// LatestBlockHash as the NSC key and find this cached entry.
require.NoError(t, UpdateNextSlotCache(ctx, blockHash[:], st))
blk := newGloasTestBlock(t, targetSlot, parentRoot)
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
require.NoError(t, err)
require.Equal(t, targetSlot, out.Slot())
// Verify that the NSC entry primed under blockHash is still present,
// confirming it was used (read) rather than bypassed.
cached := NextSlotState(blockHash[:], targetSlot)
require.NotNil(t, cached, "NSC entry under blockHash should still be present after use")
}
// TestProcessSlotsForBlock_PreGloas verifies that ProcessSlotsForBlock uses
// b.ParentRoot() as access key on pre-Gloas (Fulu) states, unchanged by the fix.
func TestProcessSlotsForBlock_PreGloas(t *testing.T) {
ctx := context.Background()
parentRoot := [32]byte{0x01, 0x02, 0x03}
targetSlot := primitives.Slot(5)
// newGloasState creates a Gloas-versioned state; we need a Fulu/pre-Gloas state.
// Use newGloasState as a base and just verify the slot advancement works.
// Note: version.Gloas is the version created by newGloasState; for pre-Gloas
// the function takes the version < Gloas path. We build a minimal Gloas state
// to test, but note ProcessSlotsForBlock has an explicit version check at top.
st := newGloasState(t, targetSlot-1, bytes.Repeat([]byte{0}, int(params.BeaconConfig().SlotsPerHistoricalRoot/8)))
blk := newGloasTestBlock(t, targetSlot, parentRoot)
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
require.NoError(t, err)
require.Equal(t, targetSlot, out.Slot())
}

View File

@@ -16,6 +16,7 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stateutil:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/math"
@@ -38,8 +39,8 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
currentEpoch := slots.ToEpoch(s.Slot())
totalActiveBalance := uint64(0)
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
e := val.ExitEpoch()
err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
e := val.ExitEpoch
if e != farFutureEpoch {
if e > exitInfo.HighestExitEpoch {
exitInfo.HighestExitEpoch = e
@@ -50,8 +51,8 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
}
// Calculate total active balance in the same loop
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
totalActiveBalance += val.EffectiveBalance()
if helpers.IsActiveCompactValidator(val, currentEpoch) {
totalActiveBalance += val.EffectiveBalance
}
return nil

View File

@@ -142,6 +142,7 @@ type Reconstructor interface {
) (map[[32]byte]*pb.ExecutionPayloadDeneb, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
ReconstructExecutionPayloadEnvelope(ctx context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope) (*ethpb.SignedExecutionPayloadEnvelope, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -652,6 +653,33 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
return unb, nil
}
// ReconstructExecutionPayloadEnvelope takes a blinded execution payload envelope and
// reconstructs the full envelope by fetching the execution payload from the EL via
// eth_getBlockByHash.
func (s *Service) ReconstructExecutionPayloadEnvelope(
ctx context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope,
) (*ethpb.SignedExecutionPayloadEnvelope, error) {
if envelope == nil || envelope.Message == nil {
return nil, errors.New("nil blinded execution payload envelope")
}
blockHash := bytesutil.ToBytes32(envelope.Message.BlockHash)
payload, err := s.ReconstructFullExecutionPayloadByHash(ctx, blockHash)
if err != nil {
return nil, errors.Wrap(err, "could not reconstruct execution payload")
}
return &ethpb.SignedExecutionPayloadEnvelope{
Message: &ethpb.ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: envelope.Message.ExecutionRequests,
BuilderIndex: envelope.Message.BuilderIndex,
BeaconBlockRoot: envelope.Message.BeaconBlockRoot,
Slot: envelope.Message.Slot,
StateRoot: envelope.Message.StateRoot,
},
Signature: envelope.Signature,
}, nil
}
// ReconstructFullExecutionPayloadByHash reconstructs a full deneb payload from EL data by block hash.
func (s *Service) ReconstructFullExecutionPayloadByHash(
ctx context.Context, blockHash [32]byte,

View File

@@ -91,7 +91,7 @@ func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
elCommit: "abcd1234",
userGraffiti: []byte("1234567890123456789"), // 19 chars, leaves 13 bytes = full format + space
wantPrefix: "1234567890123456789",
wantSuffix: " GEabcdPR",
wantSuffix: " GEabcdPR",
},
{
name: "With EL - reduced commits (24 char user, 8 bytes available) - no space, would reduce tier",
@@ -99,7 +99,7 @@ func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
elCommit: "abcd1234",
userGraffiti: []byte("123456789012345678901234"), // 24 chars, leaves exactly 8 bytes = reduced format, no room for space
wantPrefix: "123456789012345678901234",
wantSuffix: "GEabPR",
wantSuffix: "GEabPR",
},
{
name: "With EL - reduced commits (23 char user, 9 bytes available) - space fits",
@@ -107,7 +107,7 @@ func TestGraffitiInfo_GenerateGraffiti(t *testing.T) {
elCommit: "abcd1234",
userGraffiti: []byte("12345678901234567890123"), // 23 chars, leaves 9 bytes = reduced format + space
wantPrefix: "12345678901234567890123",
wantSuffix: " GEabPR",
wantSuffix: " GEabPR",
},
{
name: "With EL - codes only (28 char user, 4 bytes available) - no space, would reduce tier",

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
pb "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/holiman/uint256"
@@ -171,6 +172,49 @@ func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.Cons
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
}
// ReconstructExecutionPayloadEnvelope --
func (e *EngineClient) ReconstructExecutionPayloadEnvelope(
_ context.Context, envelope *ethpb.SignedBlindedExecutionPayloadEnvelope,
) (*ethpb.SignedExecutionPayloadEnvelope, error) {
if e.Err != nil {
return nil, e.Err
}
payload, ok := e.ExecutionPayloadByBlockHash[bytesutil.ToBytes32(envelope.Message.BlockHash)]
if !ok {
return nil, errors.New("execution payload not found for block hash")
}
return &ethpb.SignedExecutionPayloadEnvelope{
Message: &ethpb.ExecutionPayloadEnvelope{
Payload: payloadToPayloadDeneb(payload),
ExecutionRequests: envelope.Message.ExecutionRequests,
BuilderIndex: envelope.Message.BuilderIndex,
BeaconBlockRoot: envelope.Message.BeaconBlockRoot,
Slot: envelope.Message.Slot,
StateRoot: envelope.Message.StateRoot,
},
Signature: envelope.Signature,
}, nil
}
func payloadToPayloadDeneb(p *pb.ExecutionPayload) *pb.ExecutionPayloadDeneb {
return &pb.ExecutionPayloadDeneb{
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
BlockNumber: p.BlockNumber,
GasLimit: p.GasLimit,
GasUsed: p.GasUsed,
Timestamp: p.Timestamp,
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas,
BlockHash: p.BlockHash,
Transactions: p.Transactions,
}
}
// GetTerminalBlockHash --
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
ttd := new(big.Int)

View File

@@ -161,7 +161,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10
f.ProcessAttestation(t.Context(), []uint64{0}, indexToHash(1), 0, true)
f.ProcessAttestation(t.Context(), []uint64{0}, indexToHash(1), 1, true)
// With the additional vote to the left branch, the head should be 9:
// 0 <-- start
@@ -191,7 +191,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
// 7 8
// | |
// 9 10
f.ProcessAttestation(t.Context(), []uint64{1}, indexToHash(2), 0, true)
f.ProcessAttestation(t.Context(), []uint64{1}, indexToHash(2), 1, true)
// With the additional vote to the right branch, the head should be 10:
// 0 <-- start

View File

@@ -309,8 +309,8 @@ func (f *ForkChoice) updateBalances() error {
newBalance = newBalances[index]
}
// Update only if the validator's balance or vote has changed.
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance || vote.currentPayloadStatus != vote.nextPayloadStatus {
// Update only if the validator's voting slot has changed.
if vote.currentSlot != vote.nextSlot {
// Add new balance to the next vote target if the root is known.
pn, pending := f.store.resolveVoteNode(vote.nextRoot, vote.nextSlot, vote.nextPayloadStatus)
if pn != nil && vote.nextRoot != zHash {

View File

@@ -93,9 +93,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0, 0, true, true},
{indexToHash(2), indexToHash(2), 0, 0, true, true},
{indexToHash(3), indexToHash(3), 0, 0, true, true},
{indexToHash(1), indexToHash(1), 1, 0, true, true},
{indexToHash(2), indexToHash(2), 2, 0, true, true},
{indexToHash(3), indexToHash(3), 3, 0, true, true},
}
// Each node gets one unique vote. The weight should look like 103 <- 102 <- 101 because
@@ -127,9 +127,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f.balances = []uint64{100, 100, 100}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0, 0, true, true},
{indexToHash(2), indexToHash(2), 0, 0, true, true},
{indexToHash(3), indexToHash(3), 0, 0, true, true},
{indexToHash(1), indexToHash(1), 1, 0, true, true},
{indexToHash(2), indexToHash(2), 2, 0, true, true},
{indexToHash(3), indexToHash(3), 3, 0, true, true},
}
f.justifiedBalances = []uint64{10, 20, 30}
@@ -158,9 +158,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
{indexToHash(1), indexToHash(1), 0, 0, true, true},
{indexToHash(2), indexToHash(2), 0, 0, true, true},
{indexToHash(3), indexToHash(3), 0, 0, true, true},
{indexToHash(1), indexToHash(1), 1, 0, true, true},
{indexToHash(2), indexToHash(2), 2, 0, true, true},
{indexToHash(3), indexToHash(3), 3, 0, true, true},
}
f.justifiedBalances = []uint64{10, 20, 30}

View File

@@ -40,6 +40,13 @@ func (f *ForkChoice) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool)
return pn.node.root, pn.full
}
// PayloadContentLookup returns the preferred lookup key for a given beacon block root.
// If full payload content wins, it returns the block hash and true.
// If empty payload content wins, it returns the beacon block root and false.
func (f *ForkChoice) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
return f.store.payloadContentLookup(root)
}
func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
sb, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
@@ -292,6 +299,21 @@ func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
return en
}
func (s *Store) payloadContentLookup(root [32]byte) ([32]byte, bool) {
en := s.emptyNodeByRoot[root]
if en == nil || en.node == nil {
return [32]byte{}, false
}
pn := s.choosePayloadContent(en.node)
if pn == nil || pn.node == nil {
return [32]byte{}, false
}
if pn.full {
return pn.node.blockHash, true
}
return pn.node.root, false
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
@@ -372,6 +394,8 @@ func (f *ForkChoice) InsertPayload(pe interfaces.ROExecutionPayloadEnvelope) err
children: make([]*Node, 0),
}
s.fullNodeByRoot[root] = fn
payloadInsertedCount.Inc()
updatePayloadNodeMetrics(s)
f.updateNewFullNodeWeight(fn)
return nil
}
@@ -391,6 +415,7 @@ func (f *ForkChoice) SetPTCVote(root [32]byte, ptcIdx uint64, payloadPresent, bl
if n == nil {
return
}
ptcVoteCount.Inc()
if payloadPresent {
n.node.setPayloadAvailabilityVote(ptcIdx)
}
@@ -428,6 +453,12 @@ func (s *Store) resolveVoteNode(r [32]byte, slot primitives.Slot, payloadStatus
return en, slot == en.node.slot
}
// HasFullNode returns true if a full (payload) node exists for the given beacon block root.
func (f *ForkChoice) HasFullNode(root [32]byte) bool {
_, ok := f.store.fullNodeByRoot[root]
return ok
}
// BlockHash returns the hash committed in the given block
func (f *ForkChoice) BlockHash(root [32]byte) ([32]byte, error) {
s := f.store
@@ -483,3 +514,23 @@ func (s *Store) removeProposerBoostFromParent() {
}
return
}
// FullHead returns the head root and the head blockhash of the chain. It also
// returns whether the head is full or not, that is if the blockhash is for the same
// slot as the beacon root or some previous slots.
func (f *ForkChoice) FullHead(ctx context.Context) ([32]byte, [32]byte, bool, error) {
hr, err := f.Head(ctx)
if err != nil {
return [32]byte{}, [32]byte{}, false, err
}
n := f.store.headNode
pn := f.store.choosePayloadContent(n)
if pn.full && slots.ToEpoch(n.slot) >= params.BeaconConfig().GloasForkEpoch {
return hr, pn.node.blockHash, true, nil
}
fullAncestor := f.store.fullParent(pn)
if fullAncestor == nil {
return hr, [32]byte{}, false, nil
}
return hr, fullAncestor.node.blockHash, false, nil
}

View File

@@ -907,6 +907,46 @@ func TestChoosePayloadContent(t *testing.T) {
})
}
func TestPayloadContentLookup(t *testing.T) {
f := setupGloas(t, 0, 0)
ctx := t.Context()
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
t.Run("unknown root returns zero and false", func(t *testing.T) {
v, isBlockHash := f.PayloadContentLookup(indexToHash(999))
assert.Equal(t, [32]byte{}, v)
assert.Equal(t, false, isBlockHash)
})
t.Run("empty wins returns root", func(t *testing.T) {
v, isBlockHash := f.PayloadContentLookup(rootA)
assert.Equal(t, rootA, v)
assert.Equal(t, false, isBlockHash)
})
pe, err := prepareGloasForkchoicePayload(rootA)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(pe))
t.Run("full wins returns block hash", func(t *testing.T) {
en := f.store.emptyNodeByRoot[rootA]
fn := f.store.fullNodeByRoot[rootA]
require.NotNil(t, en)
require.NotNil(t, fn)
en.weight = 1
fn.weight = 2
v, isBlockHash := f.PayloadContentLookup(rootA)
assert.Equal(t, blockHashA, v)
assert.Equal(t, true, isBlockHash)
})
}
func TestGloasForkedBranches(t *testing.T) {
f := setupGloas(t, 1, 1)
s := f.store
@@ -1399,3 +1439,160 @@ func TestCanonicalNodeAtSlot_NilParent(t *testing.T) {
assert.Equal(t, [32]byte{}, root)
assert.Equal(t, false, full)
}
func TestFullHead_FullPayload(t *testing.T) {
f := setupGloas(t, 0, 0)
ctx := t.Context()
zeroHash := params.BeaconConfig().ZeroHash
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, blk, err := prepareGloasForkchoiceState(ctx, 1, rootA, zeroHash, blockHashA, zeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blk))
pe, err := prepareGloasForkchoicePayload(rootA)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(pe))
driftGenesisTime(f, 1, 0)
require.NoError(t, f.NewSlot(ctx, 1))
hr, bh, full, err := f.FullHead(ctx)
require.NoError(t, err)
assert.Equal(t, rootA, hr)
assert.Equal(t, blockHashA, bh)
assert.Equal(t, true, full)
}
func TestFullHead_EmptyPayload(t *testing.T) {
f := setupGloas(t, 0, 0)
ctx := t.Context()
zeroHash := params.BeaconConfig().ZeroHash
// Insert block A at slot 1 with payload.
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, blk, err := prepareGloasForkchoiceState(ctx, 1, rootA, zeroHash, blockHashA, zeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blk))
pe, err := prepareGloasForkchoicePayload(rootA)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(pe))
// Insert block B at slot 2 without payload.
rootB := indexToHash(2)
blockHashB := indexToHash(101)
driftGenesisTime(f, 2, 0)
require.NoError(t, f.NewSlot(ctx, 2))
st, blk, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, blockHashA, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blk))
// Head is B (empty), so blockHash should come from ancestor A.
hr, bh, full, err := f.FullHead(ctx)
require.NoError(t, err)
assert.Equal(t, rootB, hr)
assert.Equal(t, blockHashA, bh)
assert.Equal(t, false, full)
}
// TestFullHead_PreGloasBlock_ReturnsFalse verifies that FullHead returns full=false
// for pre-Gloas (Fulu) blocks even though the fork choice store internally creates a
// fullNodeByRoot entry with full=true for them (for EL optimistic validation tracking).
// Without the epoch guard in FullHead, isNewHead would spuriously fire every ticker
// tick because FullHead returned full=true while saveHead stored head.full=false.
func TestFullHead_PreGloasBlock_ReturnsFalse(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.GloasForkEpoch = 100 // Gloas activates far in the future; slot 1 is pre-Gloas
params.OverrideBeaconConfig(cfg)
f := setup(0, 0)
ctx := t.Context()
zeroHash := params.BeaconConfig().ZeroHash
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, blk, err := prepareForkchoiceState(ctx, 1, rootA, zeroHash, blockHashA, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blk))
driftGenesisTime(f, 1, 0)
require.NoError(t, f.NewSlot(ctx, 1))
hr, _, full, err := f.FullHead(ctx)
require.NoError(t, err)
assert.Equal(t, rootA, hr)
assert.Equal(t, false, full, "pre-Gloas block must return full=false from FullHead")
}
func TestUpdateBalances_SlotChangeMovesBalance(t *testing.T) {
f := setupGloas(t, 1, 1)
ctx := t.Context()
zeroHash := params.BeaconConfig().ZeroHash
// Insert block B at slot 100 and block C at slot 101.
slotB := primitives.Slot(100)
rootB := indexToHash(1)
blockHashB := indexToHash(100)
driftGenesisTime(f, slotB, 0)
st, blk, err := prepareGloasForkchoiceState(ctx, slotB, rootB, zeroHash, blockHashB, zeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blk))
slotC := primitives.Slot(101)
rootC := indexToHash(2)
blockHashC := indexToHash(200)
driftGenesisTime(f, slotC, 0)
// Use zeroHash as parentBlockHash so C builds on B's empty node (no full node needed).
st, blk, err = prepareGloasForkchoiceState(ctx, slotC, rootC, rootB, blockHashC, zeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, blk))
s := f.store
validatorBalance := uint64(32000000000)
f.justifiedBalances = []uint64{validatorBalance}
// Step 1: Validator attests for block B at slot 100 (same slot as block) with payloadStatus=false.
// resolveVoteNode(B, 100, false) → pending = (100 == 100) = true → Node.balance.
f.votes = []Vote{
{currentRoot: zeroHash, nextRoot: rootB, nextSlot: slotB, currentSlot: 0, nextPayloadStatus: false, currentPayloadStatus: false},
}
require.NoError(t, f.updateBalances())
emptyB := s.emptyNodeByRoot[rootB]
require.NotNil(t, emptyB)
assert.Equal(t, validatorBalance, emptyB.node.balance, "balance should be in Node.balance (pending)")
assert.Equal(t, uint64(0), emptyB.balance, "PayloadNode.balance should be zero")
// Step 2: Validator re-attests for the same block B but at slot 140 (new epoch, different slot).
// payloadStatus and root are unchanged, only the slot changes.
laterSlot := primitives.Slot(140)
f.votes[0].nextSlot = laterSlot
// nextRoot is still B, nextPayloadStatus is still false.
// Step 3: updateBalances should detect the slot change and reprocess.
// It should subtract from Node.balance (pending, old slot 100==100) and
// add to PayloadNode.balance (non-pending, new slot 140!=100).
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(0), emptyB.node.balance, "Node.balance should be zero after slot change moved balance out")
assert.Equal(t, validatorBalance, emptyB.balance, "balance should have moved to PayloadNode.balance (non-pending)")
// Step 4: Validator switches vote to block C at slot 101.
// The subtract from B should now correctly target PayloadNode.balance (non-pending).
f.votes[0].nextRoot = rootC
f.votes[0].nextSlot = slotC
require.NoError(t, f.updateBalances())
// B's balances should both be zero (balance was correctly subtracted).
assert.Equal(t, uint64(0), emptyB.node.balance, "Node.balance should remain zero")
assert.Equal(t, uint64(0), emptyB.balance, "PayloadNode.balance should be zero after vote moved to C")
// C should have received the balance.
emptyC := s.emptyNodeByRoot[rootC]
require.NotNil(t, emptyC)
// slot 101 == C.node.slot(101) → pending=true → Node.balance
assert.Equal(t, validatorBalance, emptyC.node.balance, "C should have the validator's balance")
}

View File

@@ -48,4 +48,33 @@ var (
Help: "The number of times pruning happened.",
},
)
payloadInsertedCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "forkchoice_payload_inserted_count",
Help: "The number of payloads inserted into forkchoice.",
},
)
payloadEmptyNodeCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "forkchoice_payload_empty_node_count",
Help: "The number of empty payload nodes currently tracked in forkchoice.",
},
)
payloadFullNodeCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "forkchoice_payload_full_node_count",
Help: "The number of full payload nodes currently tracked in forkchoice.",
},
)
ptcVoteCount = promauto.NewCounter(
prometheus.CounterOpts{
Name: "forkchoice_ptc_vote_count",
Help: "The number of PTC votes recorded by forkchoice.",
},
)
)
func updatePayloadNodeMetrics(s *Store) {
payloadEmptyNodeCount.Set(float64(len(s.emptyNodeByRoot)))
payloadFullNodeCount.Set(float64(len(s.fullNodeByRoot)))
}

View File

@@ -143,5 +143,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, inva
}
delete(s.emptyNodeByRoot, pn.node.root)
}
updatePayloadNodeMetrics(s)
return invalidRoots, nil
}

View File

@@ -63,7 +63,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{0}, newRoot, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, []uint64{0}, newRoot, slot, true)
headRoot, err = f.Head(ctx)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 1")
@@ -89,7 +89,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{1}, newRoot, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, []uint64{1}, newRoot, slot, true)
headRoot, err = f.Head(ctx)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 2")
@@ -117,7 +117,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{2}, newRoot, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, []uint64{2}, newRoot, slot, true)
headRoot, err = f.Head(ctx)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -146,7 +146,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.ProcessAttestation(ctx, []uint64{3}, newRoot, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, []uint64{3}, newRoot, slot, true)
headRoot, err = f.Head(ctx)
require.NoError(t, err)
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
@@ -176,7 +176,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, []uint64{4, 5}, indexToHash(3), slot+1, true)
headRoot, err = f.Head(ctx)
require.NoError(t, err)
assert.Equal(t, indexToHash(3), headRoot, "Incorrect head for justified epoch at slot 4")
@@ -237,10 +237,10 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// The maliciously withheld block has one vote.
votes := []uint64{1}
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, maliciouslyWithheldBlockSlot, true)
// The honest block has one vote.
votes = []uint64{2}
f.ProcessAttestation(ctx, votes, honestBlock, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, votes, honestBlock, honestBlockSlot, true)
// Ensure the head is STILL C, the honest block, as the honest block had proposer boost.
r, err = f.Head(ctx)
@@ -306,7 +306,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// An attestation is received for B that has more voting power than C with the proposer boost,
// allowing B to then become the head if their attestation has enough adversarial votes.
votes := []uint64{1, 2}
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, votes, maliciouslyWithheldBlock, maliciouslyWithheldBlockSlot, true)
// Expect the head to have switched to B.
r, err = f.Head(ctx)
@@ -381,7 +381,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// An attestation for C is received at slot N+3.
votes := []uint64{1}
f.ProcessAttestation(ctx, votes, c, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, votes, c, cSlot, true)
// A block D, building on B, is received at slot N+3. It should not be able to win without boosting.
dSlot := primitives.Slot(3)
@@ -421,7 +421,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
votes = []uint64{2}
f.ProcessAttestation(ctx, votes, d2, primitives.Slot(fEpoch), true)
f.ProcessAttestation(ctx, votes, d2, dSlot, true)
// Ensure D becomes the head thanks to boosting.
r, err = f.Head(ctx)
require.NoError(t, err)

View File

@@ -26,7 +26,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, blk.Root(), 0, true)
f.ProcessAttestation(ctx, attesters, blk.Root(), 1, true)
orphanLateBlockFirstThreshold := time.Duration(params.BeaconConfig().SecondsPerSlot/params.BeaconConfig().IntervalsPerSlot) * time.Second
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
@@ -124,7 +124,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, blk.Root(), 0, true)
f.ProcessAttestation(ctx, attesters, blk.Root(), 1, true)
driftGenesisTime(f, 3, 1*time.Second)
childRoot := [32]byte{'b'}

View File

@@ -163,6 +163,7 @@ func (s *Store) insert(ctx context.Context,
} else {
delete(s.emptyNodeByRoot, root)
delete(s.fullNodeByRoot, root)
updatePayloadNodeMetrics(s)
return nil, errInvalidParentRoot
}
} else {
@@ -196,6 +197,7 @@ func (s *Store) insert(ctx context.Context,
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
updatePayloadNodeMetrics(s)
// Only update received block slot if it's within epoch from current time.
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
@@ -235,6 +237,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
fn.children = nil
delete(s.fullNodeByRoot, node.root)
}
updatePayloadNodeMetrics(s)
return nil
}

View File

@@ -243,6 +243,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// Set the f.justifiedBalances of the last 2 validators to 0.
f.justifiedBalances = []uint64{1, 1, 1, 0, 0}
f.ProcessAttestation(t.Context(), []uint64{3, 4}, indexToHash(9), 6*params.BeaconConfig().SlotsPerEpoch, true)
// The head should be back to 10.
r, err = f.Head(t.Context())
require.NoError(t, err)
@@ -250,6 +251,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// Set the f.justifiedBalances back to normal.
f.justifiedBalances = []uint64{1, 1, 1, 1, 1}
f.ProcessAttestation(t.Context(), []uint64{3, 4}, indexToHash(9), 7*params.BeaconConfig().SlotsPerEpoch, true)
// The head should be back to 9.
r, err = f.Head(t.Context())
require.NoError(t, err)
@@ -257,6 +259,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// Remove the last 2 validators.
f.justifiedBalances = []uint64{1, 1, 1}
f.ProcessAttestation(t.Context(), []uint64{3, 4}, indexToHash(9), 8*params.BeaconConfig().SlotsPerEpoch, true)
// The head should be back to 10.
r, err = f.Head(t.Context())
require.NoError(t, err)

View File

@@ -39,6 +39,7 @@ type RLocker interface {
// HeadRetriever retrieves head root and optimistic info of the current chain.
type HeadRetriever interface {
Head(context.Context) ([32]byte, error)
FullHead(context.Context) ([32]byte, [32]byte, bool, error)
GetProposerHead() [32]byte
CachedHeadRoot() [32]byte
}
@@ -71,6 +72,7 @@ type Getter interface {
type FastGetter interface {
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
FinalizedPayloadBlockHash() [32]byte
HasFullNode([32]byte) bool
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
@@ -95,6 +97,7 @@ type FastGetter interface {
ParentRoot(root [32]byte) ([32]byte, error)
BlockHash(root [32]byte) ([32]byte, error)
CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool)
PayloadContentLookup(root [32]byte) ([32]byte, bool)
}
// Setter allows to set forkchoice information

Some files were not shown because too many files have changed in this diff Show More