Compare commits

...

309 Commits

Author SHA1 Message Date
Aarsh Shah
e8480a86af only send header once per peer per group and fix tests for a green race detector 2026-03-31 20:01:49 +05:30
Aarsh Shah
616617e300 changes as per review 2026-03-31 09:38:54 +05:30
Aarsh Shah
28acdfef9f dont construct partial columns if partial is disabled and fix logging 2026-03-26 17:57:54 +05:30
Aarsh Shah
0678ad3e27 fix log 2026-03-26 17:12:24 +05:30
Aarsh Shah
96b6512ebb clean up logs 2026-03-26 17:10:24 +05:30
Aarsh Shah
f853cabe0f include cells and proofs 2026-03-24 12:01:52 +05:30
Aarsh Shah
aa8b95c33c eager push all cells for block proposer 2026-03-23 20:01:30 +05:30
Aarsh Shah
f9e1ddcd0c de-dup find peers for full and partial columns 2026-03-19 18:42:53 +05:30
Aarsh Shah
476d5b0399 pass root to partial data column construction 2026-03-19 15:42:00 +05:30
Aarsh Shah
d07a0b44be changes as per code review 2026-03-19 15:04:28 +05:30
Aarsh Shah
c6442dd070 reject if groupID is invalid 2026-03-19 15:02:04 +05:30
Aarsh Shah
7932a88f33 type for return value of ComputeCellsAndProofsFromStructured 2026-03-19 12:50:34 +05:30
Aarsh Shah
bb08e7e108 revert changes to verifyKzgCommitmentsInclusionProof 2026-03-19 12:36:31 +05:30
Aarsh Shah
fb991b6ac6 remove ExtendFromVerifiedCells method 2026-03-19 11:58:38 +05:30
Aarsh Shah
c781bdfb85 rename p2p_pubsub_rpc_recv_pub_size_total and p2p_pubsub_rpc_drop_pub_size_total 2026-03-19 11:49:54 +05:30
Aarsh Shah
ab207b5086 refactor extractColumnIndexFromTopic 2026-03-19 11:43:17 +05:30
Aarsh Shah
7ca578c0cb remove TODO 2026-03-19 11:41:06 +05:30
Aarsh Shah
cbdd2e3076 remove whitespace bloating 2026-03-19 11:35:35 +05:30
Aarsh Shah
78115c7bcc remove TODO 2026-03-19 11:31:36 +05:30
Aarsh Shah
57c55055d7 start method for broadcaster 2026-03-19 11:30:01 +05:30
Aarsh Shah
5ccd111168 refactor handle incoming rpc 2026-03-18 15:21:21 +05:30
Aarsh Shah
f5e8119a39 add logging helpers 2026-03-18 13:28:58 +05:30
Aarsh Shah
750f54e70b rename to ErrSidecarParentUnknown 2026-03-18 10:20:30 +05:30
Aarsh Shah
3a081cc362 reverse type cast conversion 2026-03-18 10:17:38 +05:30
Aarsh Shah
64f4c524bf remove typecast that is not needed 2026-03-17 19:38:38 +05:30
Aarsh Shah
7408a4e26e improve cells to send for peer implementation 2026-03-17 19:33:06 +05:30
Aarsh Shah
e04b149bf0 make callbacks methods on the broadcaster 2026-03-17 19:24:28 +05:30
Aarsh Shah
69b02d7352 decode parts metadata correctly 2026-03-17 19:09:44 +05:30
Aarsh Shah
8fd850d68c Revert "change parse to decode"
This reverts commit 3fe9ce5e50.
2026-03-17 18:59:00 +05:30
Aarsh Shah
3fe9ce5e50 change parse to decode 2026-03-17 18:42:21 +05:30
Aarsh Shah
4d106477b8 use interface for broadcaster helpers 2026-03-17 18:07:13 +05:30
Aarsh Shah
00dae3670d make column handling methods an interface 2026-03-17 18:02:21 +05:30
Aarsh Shah
62b6439d37 release semaphore immediately 2026-03-17 17:28:38 +05:30
Aarsh Shah
58834e4ba6 use config var for DataColumnSidecarSubnetCount 2026-03-17 17:27:30 +05:30
Aarsh Shah
9240158d7e some fixes 2026-03-16 17:37:15 +05:30
Marco Munizaga
2f587d713a Update partial message usage 2026-03-12 16:01:43 -07:00
Marco Munizaga
06fbfa5ab8 Update gossipsub dep 2026-03-12 15:11:34 -07:00
Aarsh Shah
2a2d013c9b fix handling of batch 2026-03-12 13:30:42 +05:30
Aarsh Shah
645ff9c143 batch verify kzg cells for partial columns 2026-03-12 13:09:08 +05:30
Aarsh Shah
4b3871b98a improve broadcasting of partial columns 2026-03-12 11:11:38 +05:30
Aarsh Shah
3b06bd7605 better error handling 2026-03-11 16:34:29 +05:30
Aarsh Shah
90630ae336 return if broadcaster is stopped 2026-03-11 16:20:42 +05:30
Aarsh Shah
23921f9a2e last set of improvements 2026-03-11 16:11:54 +05:30
Aarsh Shah
f851d381cb republish for data column sidecars and partial columns as well 2026-03-11 14:13:49 +05:30
Aarsh Shah
c406c5c030 fix conflicts 2026-03-11 12:18:23 +05:30
Kasey Kirkham
5995381adf squash me - logger plumbing fix 2026-03-10 00:13:19 -05:00
Kasey Kirkham
f429aa6c81 lint 2026-03-09 22:56:16 -05:00
Aarsh Shah
8e9c476aa8 (16465) unify full/partial validation pipelines 2026-03-09 22:45:31 -05:00
Aarsh Shah
462ef6265a (16433) test partial column validation, broadcast 2026-03-09 22:45:22 -05:00
Aarsh Shah
a598c26443 (16364) ssz partial column metadata encoding 2026-03-09 22:45:10 -05:00
Aarsh Shah
a122010a2d (16324) process eager partial data column headers 2026-03-09 22:44:51 -05:00
Marco Munizaga
827e9d7708 Partial columns addendum (#16327)
see commits for details. best reviewed commit by commit.
2026-03-09 21:37:35 -05:00
Aarsh Shah
5136619cf7 fix partial columns broadcast (#16321)
Fix the data column broadcast logic for partial and full columns .
2026-03-09 21:37:16 -05:00
Aarsh Shah
db4c5a2793 fix lint 2026-03-09 21:35:58 -05:00
Aarsh Shah
0bd1d4ae84 fix bazel 2026-03-09 21:35:47 -05:00
Aarsh Shah
73ca00c26f fix log files CI error 2026-03-09 21:35:39 -05:00
Aarsh Shah
1390a177ed fix CI 2026-03-09 21:35:32 -05:00
Aarsh Shah
b4244b735f fix CI 2026-03-09 21:35:25 -05:00
Aarsh Shah
07f42d4cbf fix lint 2026-03-09 21:35:12 -05:00
Aarsh Shah
c2c1f0ab19 fix CI 2026-03-09 21:35:06 -05:00
Aarsh Shah
77610ee212 go mod tidy and partial columns 2026-03-09 21:34:58 -05:00
Marco Munizaga
45f97a274c add todos from call
this came from a call with Aarsh and Kasey
2026-03-09 21:34:28 -05:00
Marco Munizaga
75d7074548 Implement gossipsub_mesh_peers metric 2026-03-09 21:34:20 -05:00
Marco Munizaga
df3a214809 Track number of peers in mesh
along with tracking which peers request partial messages
2026-03-09 21:34:07 -05:00
Marco Munizaga
69561c7d43 update go-libp2p-pubsub with new tracer
the new tracer interface provides the peer ID in tracer.RecvRPC.
2026-03-09 21:33:56 -05:00
Marco Munizaga
7659ed2bb2 add todo 2026-03-09 21:33:50 -05:00
Marco Munizaga
66a967e9ba return valid if there are no datacolumns to validate 2026-03-09 21:33:39 -05:00
Marco Munizaga
12f67cb2c5 cache partial data column header by group ID 2026-03-09 21:33:27 -05:00
Marco Munizaga
7aaf47a774 Add partial message metrics 2026-03-09 21:33:17 -05:00
Marco Munizaga
0bfb19e0ea more context around errors 2026-03-09 21:33:09 -05:00
Marco Munizaga
a930ec21ad fix test typo 2026-03-09 21:32:37 -05:00
Marco Munizaga
583deedfbd eagerly push the partial data column header 2026-03-09 21:32:22 -05:00
Marco Munizaga
419aafd7b3 Include the version byte in the group ID 2026-03-09 21:31:43 -05:00
Marco Munizaga
1e485d9e5e add partial data column header 2026-03-09 21:31:31 -05:00
Marco Munizaga
0bba8cac47 Update go-libp2p-pubsub 2026-03-09 21:31:24 -05:00
Marco Munizaga
fb0dd6f927 add todo 2026-03-09 21:31:15 -05:00
Marco Munizaga
3620fe9b7c add partial-data-columns flag 2026-03-09 21:31:06 -05:00
Marco Munizaga
bc6db507e2 beacon-chain/sync: subscribe to partial columns 2026-03-09 21:30:42 -05:00
Marco Munizaga
7d29866789 publish partial columns when proposing a block 2026-03-09 21:30:19 -05:00
Marco Munizaga
40190ffe69 beacon-chain/execution: return partial columns and use getBlobsV3
... if available
2026-03-09 21:27:17 -05:00
Marco Munizaga
16ccdfaa69 core/peerdas: Add PartialColumns helper 2026-03-09 21:25:22 -05:00
Marco Munizaga
d3c0d456b0 core/peerdas: support partial responses 2026-03-09 21:25:08 -05:00
Marco Munizaga
7650dff057 beacon-chain/p2p: own and start PartialColumnBroadcaster 2026-03-09 21:24:58 -05:00
Marco Munizaga
a555eb0245 Add metrics for gossipsub message sizes 2026-03-09 21:24:38 -05:00
Marco Munizaga
67b1e21ffc Implement PartialColumnBroadcaster 2026-03-09 21:24:23 -05:00
Marco Munizaga
aa72fb21bb refactor DataColumn Cell KZG Proof verification 2026-03-09 21:24:12 -05:00
Marco Munizaga
26766b90c3 avoid needless copy 2026-03-09 21:24:04 -05:00
Marco Munizaga
864e97ca30 Add PartialDataColumn type 2026-03-09 21:23:56 -05:00
Marco Munizaga
dadf5fe459 proto: Add PartialDataColumnSidecar 2026-03-09 21:23:42 -05:00
Marco Munizaga
8a5fbf2e90 clone slice in testing util 2026-03-09 21:23:30 -05:00
Marco Munizaga
d624f89ad1 logrusadapter for slog 2026-03-09 21:23:20 -05:00
Marco Munizaga
e751d0bc2b fix multiaddr comparison 2026-03-09 21:23:13 -05:00
Marco Munizaga
f90d31eeac deps: update libp2p deps
for partial message support and simnet support
2026-03-09 21:22:58 -05:00
james-prysm
dec4b43b3e fix to attestation pool to only consume on insert (#16503)
**What type of PR is this?**
Bug fix

**What does this PR do? Why is it needed?**

fixes bug on payload attestation pool consuming on reads, when we should
only prune on insert

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-10 00:02:52 +00:00
james-prysm
17ea45a011 adding the duties stores for validator duties processing (#16479)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

This PR refactors the way we store different validator duties into a
duties store for easier splitting of tasks and in a future pr processing
duties for split endpoints. This PR will reduce the number of changes
when we start calling the different endpoints introduced in
https://github.com/OffchainLabs/prysm/pull/16416

pr is part of https://github.com/OffchainLabs/prysm/pull/16421

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2026-03-09 21:44:14 +00:00
terence
1934afac73 sync: wire payload attestations into pending pool (#16492)
This PR wires payload attestations into the pending pool arrived over
sync gossip pipeline
2026-03-09 18:32:10 +00:00
james-prysm
d0c9a31657 Validator client ptc actions (#16407)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR introduces PTC attestations including getting the attestation
data and signing and submitting, the getting and submitting are stubbed
out for now as the APIs are not implemented /merged yet.

Tested in kurtosis to verify no regressions 

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-09 16:39:47 +00:00
terence
66c70200ee duties: wire PTC slot assignments into validator duty pipeline (#16489)
This PR adds end-to-end PTC support for validator duties by adding PTC
duty support in duties v2:
  - introduced repeated PTC slot assignments per validator
- cpmputed assignments by scanning epoch slots and checking
`gloas.PayloadCommittee`
  - propagated PTC slots through server/adapter duty responses
2026-03-09 14:20:13 +00:00
Aarsh Shah
efe2bce182 check for stop in publish 2026-03-09 14:40:36 +04:00
Aarsh Shah
cdd169197a pick up marco's fixes 2026-03-09 12:28:44 +04:00
Aarsh Shah
609d382cbf remove getBlobsCalled param 2026-03-09 11:53:35 +04:00
Marco Munizaga
01ec77c3c5 don't gitignore the execution directory 2026-03-09 11:28:15 +04:00
Aarsh Shah
361c423d78 revert removal of logrus adaptor 2026-03-09 11:21:57 +04:00
Bastin
928a874e4a add log.go files to runtime (#16491)
**What does this PR do? Why is it needed?**
This PR adds the `runtime` package and it's subpackages (except
`runtime/logging`) to the log generation process. which means they get
an additional `log.go` file with the package path as a field.

**Other notes for review**
- `runtime/logging` is an exception because adding it to the log gen
process messes with the internals of the package, since it's handling
the logging formats, etc...

- to test this, you can look at the logs emitted from these packages
before and after this PR. they will have a field `prefix` before this
PR, and no `prefix` after. The most obvious one is the runtime
(`registry`) package itself which usually emits a log at the start and
end of beacon node.

This is as part of my goal to remove all `prefix` logs and move them to
the new `package_path` format.
2026-03-08 21:09:40 +00:00
Bastin
ed8a3351aa support for calling debug/states/{id} with pre-payload roots (#16466)
in `rpc/lookup/stater`, when trying to fetch a state using a state root,
we only look in the `state.StateRoots` .
in gloas that would only account for post-payload roots. 

This PR adds a non-optimal way to support fetching states using
pre-payload state roots (dora does this).

I'm running a local devnet so I can see how long it takes for the stater
to respond in the worst case. but I think it should be fine for beacon
API.
2026-03-08 19:51:44 +00:00
terence
5b95d11c5e rpc: allow execution payload envelope lookup by block ID (#16495)
fixes #[16494](https://github.com/OffchainLabs/prysm/issues/16494)
2026-03-08 14:00:52 +00:00
terence
7a4bea0e44 rpc/validator: implement PayloadAttestationData and SubmitPayloadAttestation gRPC endpoints (#16487)
- This PR adds the payload attestation prototype flow. 
- On the proto side it introduces `PayloadAttestationDataRequest` and
two RPC methods on `BeaconNodeValidator`: `PayloadAttestationData` and
`SubmitPayloadAttestation`.
- The `ForkchoiceFetcher` interface is extended with
`HighestReceivedBlockRoot() [32]byte`, which returns the beacon block
root at the highest received slot and is used to determine which block
PTC members attest to, and `HasFullNode([32]byte) bool`, which indicates
whether a full payload node exists for a given root and determines the
`PayloadPresent` field in the response.
- The RPC server implementation lives in
`beacon-chain/rpc/prysm/v1alpha1/validator/payload_attestation.go`.
`PayloadAttestationData` validates the current slot, checks sync status,
and returns attestation data using the forkchoice helpers, while
`SubmitPayloadAttestation` validates the request, broadcasts it over
P2P, applies it locally through `ReceivePayloadAttestationMessage`, and
inserts it into the payload attestation pool. Wiring adds
`PayloadAttestationPool` and `PayloadAttestationReceiver` to `Server`,
`rpc.Config`, and `BeaconNode`, with the pool instantiated in
`node.New()`.
- Both RPCs are marked `deprecated = true` since this prototype will
eventually move to the beacon REST API.
2026-03-07 04:57:28 +00:00
terence
e899003973 operations/payloadattestation: add PTC attestation pool (#16486)
Introduces the `payloadattestation` pool package, a thread-safe store
for aggregating `PayloadAttestationMessage`s from PTC members before
they are included in blocks by the `slot N+1` proposer.

PTC validators broadcast `PayloadAttestationMessage`s during slot N
attesting whether the execution payload arrived on time
(`PayloadPresent`) and whether blob data is available
(`BlobDataAvailable`). The slot N+1 proposer collects these and includes
aggregated `PayloadAttestation`s in the block body.

The pool:
- Keys entries by struct — one entry per distinct `(block_root, slot,
payload_present, blob_data_available)` tuple
- Aggregates BLS signatures and bit vectors as messages arrive
- Exposes `PendingPayloadAttestations(slot)` for proposer block
construction. Slot and lower are automatically pruned
2026-03-06 22:50:07 +00:00
james-prysm
e751a74c64 gloas attestation (#16391)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

Adds basic changes for gloas attestations to have updated index value
based on payload availability.

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 19:44:21 +00:00
satushh
6826e77539 Use copy() instead of byte-by-byte loop in MarshalSSZ (#16483)
**What type of PR is this?**

Optimisation

**What does this PR do? Why is it needed?**

Use copy() instead of byte-by-byte loop in MarshalSSZ
Similar to https://github.com/OffchainLabs/prysm/pull/16222
2026-03-06 18:01:55 +00:00
Jun Song
15c178ef0c Add Close() call for resources (http response, file descriptor) in e2e package (#16481)
**What type of PR is this?**

> Bug fix

**What does this PR do? Why is it needed?**

Some resources in e2e package didn't get closed after usage.

**Which issues(s) does this PR fix?**

N/A

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-03-06 17:39:25 +00:00
Jun Song
e115137591 Fix file descriptor leaks in CopyFile (#16480)
**What type of PR is this?**

> Bug fix

**What does this PR do? Why is it needed?**

Close file descriptors for both source/destination files in `CopyFile`
function.

**Which issues(s) does this PR fix?**

N/A

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-06 17:33:58 +00:00
Manu NALEPA
dc62271ebb Use InitializeFromProtoUnsafeXXX instead of InitializeFromProtoXXX when possible. (Nishant's optimization) (#16420)
**What type of PR is this?**
Optimisation

**What does this PR do? Why is it needed?**
Use `InitializeFromProtoUnsafeXXX` instead of `InitializeFromProtoXXX`
when possible. (@nisdas' change)
This change avoids useless copies.

The difference between these two methods is the latest copies the input
state. However, if the input state is guaranteed not to be modified
after the use of this function (and, in particular, if the input state
is not used at all after the use of this function), then copying it is
unnecessarry. ==> Using the "unsafe" method is fine in this case.

**Other notes for review**
As a reviewer, you should ensure that when the unsafe method is used,
then the input state is never modified after the use of the function.

This PR is much easier to review directly in VSCode, because it displays
more context by default.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-06 15:46:34 +00:00
Manu NALEPA
3ec505bc22 Periodically evict finalized states in checkpoint states cache (#16458)
**What type of PR is this?**
Bug fix

**What does this PR do? Why is it needed?**
This PR evicts finalized states in checkpoint states cache.

States are efficiently stored in caches, especially thanks to multi
value slices. If 1 state takes 300 MB and 2 states that are really
similar are stored in a cache, then these 2 states could only need let's
say 310MB of cache memory (instead of 300 MB x 2 = 600 MB).

**Before the commit creating the memory issue**, new states were
regularly stored in the `CheckpointStateCache`. This cache has 10 slots.
After this cache is full, oldest values (not quite exactly because it's
a LRU cache) are pruned.

**After the commit creating the memory issue**, new states are quite
rarely inserted into this cache. For example, on a run, almost 5H (!)
were needed before the first value was evicted from this cache. This
mean this cache contains multiple states that do not share a lot of
values with other states in all other caches/head.
==> A lot of fields stay in memory that are exclusively needed for the
(old) states only present in this cache.

The beacon node now evicts finalized states from the cache.

**Which issues(s) does this PR fix?**
- https://github.com/OffchainLabs/prysm/issues/16376

<img width="1022" height="914" alt="image"
src="https://github.com/user-attachments/assets/98886364-001a-48fc-a952-5c6a7e80bf88"
/>

**Acknowledgements**
- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-06 09:39:37 +00:00
Potuz
6be77c0194 Do not reject on blocks unless they are provably bad (#16471)
Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-06 02:20:11 +00:00
Potuz
03fa7042cb Dont fail seting fc (#16478)
Do not fail when setting up the forkchoice checkpoints. Balances will be
wrong, but they self-heal on 2 epochs.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-06 00:13:40 +00:00
terence
c620f29aab fix(sync): do not fail block processing on hot state DB cleanup error (#16473)
During initial sync, `checkSaveHotStateDB` can fail when it tries to
disable hot-state-saving mode and clean up previously saved states. If
one of those states has since become the finalized checkpoint,
`DeleteStates` returns `ErrDeleteJustifiedAndFinalized`, which is
correct behavior from the DB layer, but should not kill block
processing.
This was observed on epbs-devnet-0 at slot ~3742 where the error
cascaded as:
```
Skip processing Gloas blocks error=receive Gloas block at slot 3742: check save hot state db: cannot delete finalized block or state
```
The fix downgrades the error to a log, matching the pattern already used
by `prunePostBlockOperationPools` two lines above. In the long run, we
should remove such scheme saving unfinalized states in DB when we dont
finalize after some epochs this was a work around for medella incident
2026-03-05 18:42:07 +00:00
terence
7a652d7ec6 fix(sync): preserve the full signed execution payload envelope (#16474)
fix(sync): preserve the full signed execution payload envelope in pubsub
validations
2026-03-05 15:24:23 +00:00
Aarsh Shah
9199377539 fix bazel build 2026-03-05 15:51:45 +04:00
Aarsh Shah
707b6dad06 add docs 2026-03-05 15:48:11 +04:00
Aarsh Shah
4ad1f87429 changes based on self review 2026-03-05 15:31:19 +04:00
Potuz
b59a830dce Gloas/safer pending signature (#16470)
This PR hardens the pending payload structure on a few ways:

- It quickly ignores before signature verification if we already have
many pending payloads
- It ignores rather than reject payloads for the self-build block if
they fail verification, this is because we may get as in devnet-zero
from gossip payloads from unknown branches.
- It adds a counter to prevent being DOS by the previous feature. Since
we are ignoring instead of rejecting those payloads, it only allows 3
such payloads to come. This counter is pruned on finalization.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-05 04:36:57 +00:00
Potuz
6999943c3d Gloas/late block tasks right fcu (#16454)
This PR changes how we update the next slot cache on late blocks. 

- When the latest saved state in the NSC agrees with the head root, then
we update this latest state because it will be useful. Otherwise we
instead update the headstate and put it as latest.
- This also keys the insertion in the NSC post-Gloas with the right
blockhash instead of blockroot if the headstate is based on full. This
exploits a dangerous feature of `IsParentBlockFull` documented in its
godoc.

This PR also updates the cache and the epoch boundary when we process
the payload.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-05 00:55:07 +00:00
Potuz
90064edd54 Do not request blocks that are being synced (#16468)
When adding a payload to the pending queue, do not send a batch request
if the block is already in forkchoice or being synced.

Similar when processing pending attestations.

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-03-05 00:06:40 +00:00
james-prysm
393eb1e83c Reorganize duties functions (#16457)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

just moving some functions around to reduce duties split pr. part of
https://github.com/OffchainLabs/prysm/pull/16421

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-04 20:47:22 +00:00
Potuz
d2bcf75c50 Gloas/pending payload dependent root (#16461)
Only verify the envelope's signature before putting in the queue if
non-self-building or the proposer is in the lookahead. Otherwise just
ignore.

This still can cause a false rejection in a very edgy case of a builder
that is chosen exactly after being active and we are syncing from afar.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 18:40:11 +00:00
terence
89d3a6c66f Preserve canonical order in writeBlockBatchToStream (#16463)
Summary
- update `writeBlockBatchToStream` to preserve canonical slot order when
a batch contains both blinded and full blocks
- reconstruct blinded blocks before streaming and substitute them by
slot during canonical write pass
2026-03-04 18:17:25 +00:00
Bastin
bf2485eb71 gloas support for debug/beacon/state/ api (#16296)
**What does this PR do? Why is it needed?**
gloas support for debug/beacon/state/ api
2026-03-04 16:41:32 +00:00
terence
a88f60f1fa Apply gloas payload state mutations during stategen replay (#16464)
State replay in gloas needs to reflect post-payload state even when
replay input is a blinded envelope format. Without applying these
mutations during stategen replay, reconstructed states can diverge from
the canonical post-payload state. At the same time, not every block has
an envelope, so replay must remain robust when envelope data is absent

Summary
- apply Gloas execution payload envelope state mutations during stategen
replay when a blinded envelope is available
- extract reusable payload-mutation logic into
`ApplyExecutionPayloadStateMutations` and use it from both normal
payload processing and stategen replay
2026-03-04 16:26:04 +00:00
Potuz
33f899506f Gloas/request parent payload (#16418)
This PR adds sync paths to request by RPC the parent Payload in the
event that we know the incoming block is based on full and we do not
have the payload.

It also adds an invalid payload cache and keeps track of some invalid
ones that we have seen on the blockchain package.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 15:14:36 +00:00
Aarsh Shah
04340407a5 Merge remote-tracking branch 'origin/tests/tests-for-partial-broadcaster' into fix/unify-validation-pipeline 2026-03-04 18:22:49 +04:00
Aarsh Shah
41056b0828 Merge remote-tracking branch 'origin/feat/remove-logrus-changes' into tests/tests-for-partial-broadcaster 2026-03-04 18:21:40 +04:00
Aarsh Shah
b1cbb5474b Merge remote-tracking branch 'origin/feat/update-gossipsub-update-new-partial-API' into feat/remove-logrus-changes 2026-03-04 18:20:59 +04:00
Aarsh Shah
bde83a2f3f more logging and bazel fixes 2026-03-04 17:58:49 +04:00
Aarsh Shah
eadfb59015 fixes and tests 2026-03-04 17:08:22 +04:00
Aarsh Shah
43cf25660e bazel fixes and validation pipeline 2026-03-04 13:29:59 +04:00
Potuz
f7ead02e6e Make Dora Happy (#16441)
- This PR adds the execution_payload_envelope Beacon API endpoint
supporting both JSON and SSZ
- It add the execution_payload_available event stream 

it also adds as a stub without a trigger the execution_payload_bid event
stream.

These are the minimum necessary to get Dora happy on Kurtosis. Haven't
reviewed this PR yet, so opening it as draft. Only check was in Kurtosis
against Dora.

<img width="1555" height="1177" alt="Screenshot 2026-02-27 at 7 18
01 PM"
src="https://github.com/user-attachments/assets/e888ccf3-68fe-42c0-9f9a-f911438438d1"
/>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-04 01:12:58 +00:00
james-prysm
0abf17f6cd refactor duties internal into core helper functions (#16402)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

This PR is a small component of
https://github.com/OffchainLabs/prysm/pull/16377 with the goal of
refactoring duties into separate endpoints for gloas. This first step
breaks out helper functions that will be useful and important in
breaking out the apis.

did kurtosis testing and basic bench marks to make sure there's no bad
regression

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: satushh <satushh@gmail.com>
2026-03-03 21:09:23 +00:00
Bastin
8307ee1098 Make committeeIndex parameter optional for GetAttestationData beacon API post Gloas (#16436)
after gloas the parameter `committeeIndex` of the beacon API
`GetAttestationData` is optional.

[ethereum/beacon-APIs#578](https://github.com/ethereum/beacon-APIs/pull/578)
2026-03-03 20:00:07 +00:00
Bastin
7e4a039a7f Gloas API state structs (#16276)
**What does this PR do? Why is it needed?**
Adds Gloas API structs, and conversion functions
(`BeaconStateGloasFromConsensus()`)
2026-03-03 17:56:35 +00:00
Potuz
d46621eea3 Add Payload Envelope pending queue (#16414)
Adds a queue to keep payload envelopes for which their block hasn't been
seen pending. When process the block we send a call to process the
corresponding payload. Elements from the queue are removed on
finalization. Only one payload is kept per root.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 05:22:31 +00:00
Potuz
8b25ffaa45 Gloas/weights logs (#16456)
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-03 02:47:03 +00:00
Potuz
17f1b78494 Better usage of the Next Slot cache in Gloas (#16439)
This PR exposes a function from the blockchain package to obtain a given
blocks's parent state.

It also fetches the correct state before processing slots to obtain the
prestate of a block.
2026-03-03 01:50:30 +00:00
terence
5874226067 Fix Gloas fork version using MetaDataV1 instead of MetaDataV2 (#16448)
Fix Gloas metadata version negotiation (MetaDataV1 → MetaDataV2)
2026-03-02 23:01:00 +00:00
Barnabas Busa
544bc3eb45 Fix nil pointer dereference in validator client with non-Prysm beacon-nodes (#16449)
1. When connecting to a REST-only beacon node (e.g. Teku, Lighthouse),
the gRPC connection provider is nil. The grpcClientManager was
unconditionally calling ConnectionCounter() on the provider in both
newGrpcClientManager and getClient, causing a SIGSEGV panic on startup.
2. runner.go — Made initial PushProposerSettings non-fatal. Lighthouse
returns 500: UnableToReadSlot on
/eth/v1/validator/prepare_beacon_proposer at genesis (slot 0) before any
blocks exist. The run loop already handles this as a warning — now the
initial call does too, and it retries on the next slot.


to repro the bug:
```yaml
participants:
  - el_type: geth
    el_image: ethpandaops/geth:master
    cl_type: prysm
    cl_image: ethpandaops/prysm-beacon-chain:develop
    vc_type: prysm
    vc_image: ethpandaops/prysm-validator:develop
    count: 1
  - el_type: geth
    el_image: ethpandaops/geth:master
    cl_type: teku
    cl_image: ethpandaops/teku:master
    vc_type: prysm
    vc_image: ethpandaops/prysm-validator:develop
    count: 1
additional_services:
  - dora
```

To repro the fix: 
```yaml
participants:
  - el_type: geth
    el_image: ethpandaops/geth:master
    cl_type: prysm
    cl_image: ethpandaops/prysm-beacon-chain:develop
    vc_type: prysm
    vc_image: ethpandaops/prysm-validator:develop
    count: 1
  - el_type: geth
    el_image: ethpandaops/geth:master
    cl_type: teku
    cl_image: ethpandaops/teku:master
    vc_type: prysm
    vc_image: ethpandaops/prysm-validator:barnabasbusa-bbusa-vc-fix
    count: 1
  - el_type: geth
    el_image: ethpandaops/geth:master
    cl_type: lighthouse
    cl_image: ethpandaops/lighthouse:unstable
    vc_type: prysm
    vc_image: ethpandaops/prysm-validator:barnabasbusa-bbusa-vc-fix
    count: 1


additional_services:
  - dora

```



**What type of PR is this?**

> Uncomment one line below and remove others.
>
> Bug fix
> Feature
> Documentation
> Other

**What does this PR do? Why is it needed?**

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [X] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [X] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [X] I have added a description with sufficient context for reviewers
to understand this PR.
- [X] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-02 21:46:22 +00:00
Potuz
de6d9947d7 remove spurious addition from gitignore (#16455) 2026-03-02 19:32:50 +00:00
terence
e5382d95dd Add ExecutionPayloadEnvelopesByRange RPC handler for Gloas (#16444)
- Adds the P2P RPC method `execution_payload_envelopes_by_range/1` for
both server handler + client requester
- Server validates range, clamps to Gloas fork start, streams blinded
envelopes from DB with a placeholder unblinded payload (EL
reconstruction is a future TODO)
- Client validates response slots are monotonically increasing and
within the requested range
- Registers a dedicated rate limiter bucket (`envelopeCollector`) for
the new topic
2026-03-02 19:19:57 +00:00
satushh
6d8445d440 Implementation of ExecutionPayloadEnvelopesByRoot v1 (#16394)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

Implements the `ExecutionPayloadEnvelopesByRoot v1` RPC method as
specified in the
https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#executionpayloadenvelopesbyroot-v1.

This allows peers to request signed execution payload envelopes by their
beacon block root.

This is a cache-only initial implementation. 
TODO in follow up work: DB storage 

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [ ] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-03-02 15:03:31 +00:00
Potuz
7b16c34af3 Fixed topic name for Gloas (#16443) 2026-03-01 00:29:21 +00:00
Potuz
91577760b6 better gloas logs (#16435)
Co-authored-by: terence <terence@prysmaticlabs.com>
2026-02-28 05:09:44 +00:00
terence
9ced048510 Add gloas beacon state marshal/unmarshal support (#16437) 2026-02-28 03:53:51 +00:00
Potuz
0439151373 Save head on gloas blocks (#16434)
Alternative to #16425
2026-02-27 22:41:34 +00:00
terence
5f050566f4 Update consensus node weight to return both empty and full (#16430)
This PR separates two different weight semantics that were previously
conflated in `Weight`
`Weight(root)` now consistently returns payload-node weight
`ConsensusNodeWeight(root)` is added for consensus-node weight (includes
empty + full payload effects in Gloas).
The "Head block is not the received block" log path now uses
`ConsensusNodeWeight`, so log output matches intended interpretation
2026-02-27 20:14:55 +00:00
terence
8a43513f5a Add receive payload to PublishExecutionPayloadEnvelope (#16429) 2026-02-27 19:32:21 +00:00
terence
6c8504ef71 Get parent root return parent root if parent is older than gloas (#16424)
This PR makes sure gloas parent pre‑state lookup falls back to the
parent root when the parent slot is pre‑fork.

Example:
- GloasForkEpoch = 1, 8 slots per epoch, and we process a block at slot
9 with parent at slot 8
- Slot 8 is still pre‑Gloas. The pre‑state for slot 9 must be fetched by
parent root, not by hash
2026-02-27 17:47:01 +00:00
terence
e99f86ecc5 Add gloas p2p support for gossip topics and scoring (#16423) 2026-02-27 15:33:43 +00:00
terence
0c36416677 Add generic signed beacon block gloas to factory (#16428) 2026-02-27 14:59:57 +00:00
terence
f2db0faea8 Fix use latest block hash for gloas parent hash (#16427)
Fix parent block hash look up in Gloas case, which we should be using
state's latest block hash instead of normal capella path
2026-02-27 14:30:58 +00:00
Aarsh Shah
ecd3a910de fix linting 2026-02-27 11:42:26 +04:00
Aarsh Shah
1f5f3c879a finish tests 2026-02-27 11:33:02 +04:00
Jun Song
e31f59e04e Fix clang-format.yml workflow (#16412)
**What type of PR is this?**

> Bug fix

**What does this PR do? Why is it needed?**

CI - Protobuf Format actually always return **0**, so the formatting
check always become no-op.

```
/entrypoint.sh: line 44: local: `proto/testing/test.proto': invalid variable name for name reference
```

This corresponds to the actual [source
code](27cc2adf6e/entrypoint.sh (L44)).
I think it's better to use `clang-format` natively rather than fixing
the upstream dependency.

This PR also formats `*.proto` files with LLVM style.

**Which issues(s) does this PR fix?**

N/A

**Other notes for review**

N/A

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-27 01:04:51 +00:00
Potuz
83a5210549 Add helper to get canonical node information (#16419)
This adds a new helper to obtain head information from forkchoice. 

⚠️ It does not recompute Head as that is expensive. It uses the cached
head node from forkchoice.

An attester calls it by passing the attestation slot. It will return the
right combination of the beacon block root and the payload content.
Attester needs to set committee index to 0 if false and 1 if true.

A proposer calls it by passing the current slot. It will tell it whether
to build on full or empty.
2026-02-26 20:34:22 +00:00
Preston Van Loon
0aa4a08b7b sync: remove unnecessary SignatureBatch.Copy() in batch verifier (#16398)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

This copy is unnecessary. There is no code path which mutates or would
mutate the signature set. Some local profiling indicates this would
reduce GC pressure somewhat. It was about 8.5% of the allocations on my
mainnet node.

**Which issues(s) does this PR fix?**

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-26 19:42:25 +00:00
Manu NALEPA
9c5d4a1767 Update c-kzg-4844 to v2.1.6 (#16417)
**What type of PR is this?**
Other

**What does this PR do? Why is it needed?**
Update c-kzg-4844 to v2.1.6

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-26 19:20:58 +00:00
Preston Van Loon
dd1ede572d api: reduce gzip compression level (#16399)
**What type of PR is this?**

Other

**What does this PR do? Why is it needed?**

This is a small optimization on the GetValidators endpoint for the
beacon API. The default compression level was [level
6](https://cs.opensource.google/go/go/+/refs/tags/go1.26.0:src/compress/flate/deflate.go;l=591)
and best speed is level 1. When serving this endpoint, gzip was almost
40% of my CPU profile.

**Which issues(s) does this PR fix?**

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-26 18:57:05 +00:00
Potuz
ec7eb97d41 Refactor areDataColumnsAvailable to accept commitments directly (#16405)
Alternative to #16404

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 17:43:12 +00:00
Potuz
0c893bd1a6 Send PTC attestations from block to forkchoice (#16415)
Send PTC attestations from the block to forkchoice.
2026-02-26 16:14:46 +00:00
Potuz
9a7cae9e5c Adapt lateBlockTasks for Gloas fork (#16403)
Switch the late block ticker from SecondsPerSlot/3 to
SlotComponentDuration(AttestationDueBPS), and replace it with
AttestationDueBPSGloas at the Gloas fork epoch. In lateBlockTasks,
branch FCU calls so Gloas+ states use notifyForkchoiceUpdateGloas with
LatestBlockHash instead of the pre-Gloas notifyForkchoiceUpdate path.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 15:25:01 +00:00
terence
c114bc57d9 Implement gloas state transition and more spec tests (#16406) 2026-02-26 15:36:23 +00:00
Potuz
a9aea0ba25 Receive PTC attestation (#16413)
wire up receival of PTC attestations and send to forkchoice.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 12:49:04 +00:00
Aarsh Shah
7d1a8892dd tests for handling validated cells 2026-02-26 16:30:35 +04:00
Aarsh Shah
c1275ac3de unit tests for the broadcaster 2026-02-26 16:05:24 +04:00
terence
9f2ade53ff Fix Gloas builders sweep when builders list is empty (#16411)
This PR fixes builders sweep withdrawals to no‑op when the builders list
is empty, preventing out‑of‑range errors during gloas transitions and
aligning behavior with the spec’s zero‑builders case


### Spec reference
```
builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
...
for _ in range(builders_limit):
    builder = state.builders[builder_index]
```
When `len(state.builders) == 0`, `builders_limit == 0`, so the loop does
not run and no indexing should occur.
2026-02-26 02:25:42 +00:00
terence
5b19916067 gloas: implement modified process withdrawals (#16310)
this PR implements [process
withdrawals](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_withdrawals)for
gloas. In particular

* `process_withdrawals` signature changed from the last fork, moving
from `(state, payload)` to `(state)`. It now adds builder pending
withdrawals, sweeping withdrawals, and index updates, and removes the
payload check. Because of these drastic changes, I think it is more
beneficial to implement gloas process withdrawals from scratch rather
than overloading the electra function

* previously, for every withdrawal, we would lock and unlock state to
deduct validator balance. In this fork, when a withdrawal is for a
builder, we also decrease the builder balance. I improved this by encap
the whole logic under a state setter
`st.DecreaseWithdrawalBalances(expected.Withdrawals)`, which only locks
and unlocks once

* what's new are the following (in code order).. In
`process_withdrawals`

  * `IsParentBlockFull`
  * `ExpectedWithdrawalsGloas`
    * `appendBuilderWithdrawals`
    * `appendBuildersSweepWithdrawals`
  * `DecreaseWithdrawalBalances`
  * `SetPayloadExpectedWithdrawals`
  * `DequeueBuilderPendingWithdrawals`
  * `SetNextWithdrawalBuilderIndex`

* i passed withdrawals by reference as `*[]*enginev1.Withdrawal`. This
results in two slice header copies of 24 bytes. I find the cost
non-matter and the code more readable, but this can be changed if anyone
has a stronger opinion
2026-02-25 18:10:15 +00:00
Potuz
e4cbb34c2f Gloas/forkchoice ptc (#16392)
This PR appplies proposer boost as per Gloas spec and deals with
decisions between full and empty children by taking PTC and DA vote into
account. It **does not** yet deal with the new equivocating conditions
to avoid applying proposer boost on those cases.

Missing tests still, opening for review early, but the bulk of Gloas
forkchoice unit tests will come in this PR later

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 17:32:34 +00:00
Potuz
c9a2a0dd86 Gloas Changes to ReceiveBlock (#16396)
This PR removes any execution traces from ReceiveBlock after Gloas. It
sets daWaitTime to zero and changes the logs to report on the important
fields in the bid.

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-02-24 20:43:38 +00:00
james-prysm
fdd04a5466 gloas grpc proposer (#16336)
<!-- Thanks for sending a PR! Before submitting:

1. If this is your first PR, check out our contribution guide here
https://docs.prylabs.network/docs/contribute/contribution-guidelines
You will then need to sign our Contributor License Agreement (CLA),
which will show up as a comment from a bot in this pull request after
you open it. We cannot review code without a signed CLA.
2. Please file an associated tracking issue if this pull request is
non-trivial and requires context for our team to understand. All
features and most bug fixes should have
an associated issue with a design discussed and decided upon. Small bug
   fixes and documentation improvements don't need issues.
3. New features and bug fixes must have tests. Documentation may need to
be updated. If you're unsure what to update, send the PR, and we'll
discuss
   in review.
4. Note that PRs updating dependencies and new Go versions are not
accepted.
   Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->

**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR defines the validator proposer paths for Gloas, namely adding
gloas fork support on the validator client for propose function, and
adding the following gRPC endpoint support: get Gloas Block, publish
Gloas Block, Get Payload Envelope, Publish Payload envelope.

we propose in this order

1. Get block (cache payload)
2. Submit block
3. Get payload (get post state of 2 via chain info getter) // needs the
block to be recieved and processed
4. Submit payload

There are several things still missing in the PR depending on
https://github.com/OffchainLabs/prysm/pull/15656 as well as future ones,
for now I have left those areas as TODO remarks

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2026-02-24 19:16:45 +00:00
Potuz
a199580672 Receive payload envelope (#16386)
This PR wires execution payload envelope processing on the blockchain
package for Gloas.

It **does not** deal with DA yet.

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-24 16:56:32 +00:00
Potuz
b5bdd65f64 Remove head state from new head event (#16388)
The headstate parameter is not used.
2026-02-23 19:06:55 +00:00
Potuz
ce4e653261 Handle Gloas pre-state fetching (#16382)
- a new getter to forkchoice that returns the block hash for the given
blockroot
- a new blockchain getter getLookupParentRoot that returns the root that
serves as key for stategen to fetch the parent state, pre-gloas is not
modified and always returns the blockroot
- Changes the signature of getBlockPreState to receive a ROBlock
2026-02-23 18:11:47 +00:00
Potuz
6f87a0cb95 Remove num active validators (#16390)
Remove unused field in forkchoice

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-23 18:03:55 +00:00
terence
d262c9f927 Implement upgrade gloas (#16378)
This PR ismplement Gloas fork upgrade. There are two main parts
- The first part is upgrade fulu state to gloas and including new gloas
fields
- The second part on board builder at the fork, which is a state setter
under one block. For this, we had to refactor a few things to be lock
free
- We also added spec tests for it to pass
2026-02-23 17:44:29 +00:00
Potuz
7865bd5f32 Gloas/forkchoice gloas weights (#16357)
This PR changes attestation weight counting so that they are compatible
with Gloas.

Same slot attestations are added to the pending node, otherwise
attestations are added to the full payload status node. This PR also
deals with updating the balances and weight transfer. Notice that due to
our current limitations on how we account for weights, we will mimic the
spec status as if https://github.com/ethereum/consensus-specs/pull/4918
were merged. Counting for these attestations would be quite difficult
otherwise in Prysm.
2026-02-23 16:33:21 +00:00
terence
7f2593e496 e2e: wait for all nodes to reach mid-epoch before head compare (#16379)
`AllNodesHaveSameHead` only waits for mid‑epoch on node 0. In
`testCheckpointSync`, the checkpoint‑sync node can still be finishing
syncing while node 0 advances, so the evaluator compares heads across
different epochs and flakes. We should wait for all nodes to reach the
same point before comparing head epochs and roots

I also added better handling for context timeout per feedback from
@Inspector-Butters
2026-02-23 15:12:01 +00:00
Aarsh Shah
c15ffb4469 Rename metrics for partial cells to confirm with ETH beacon metrics (#16369)
This PR renames some metrics for partial cells to confirm with the ETH
beacon metrics at https://github.com/ethereum/beacon-metrics/pull/21.
2026-02-23 15:00:21 +04:00
Aarsh Shah
cdfc2ba3cd update correctly 2026-02-23 09:38:48 +04:00
Aarsh Shah
1cd3418dd7 send parts metadata along with header 2026-02-23 09:34:49 +04:00
Aarsh Shah
8b295c6208 Update beacon-chain/p2p/partialdatacolumnbroadcaster/partial.go
Co-authored-by: Marco Munizaga <git@marcopolo.io>
2026-02-23 09:11:15 +04:00
satushh
55e2f0a7d2 Ptc duty endpoint /eth/v1/validator/duties/ptc/{epoch} (#16326)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

Implement the endpoint `/eth/v1/validator/duties/ptc/{epoch}` for gloas.
Relevant link:
https://github.com/ethereum/beacon-APIs/pull/552/files#diff-ea5c1a400bd02986042f033c8d62441c11077868b0a325c6e24297875fd35c38

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [ ] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-21 03:58:34 +00:00
Aarsh Shah
09c123294f remove logrus adaptor from the partial cells PR 2026-02-20 11:31:04 +04:00
Aarsh Shah
6710215816 Merge branch 'feat/process-eager-partial-header' into feat/update-gossipsub-update-new-partial-API 2026-02-20 10:37:24 +04:00
Aarsh Shah
b8e0f8bf79 drop header if no semaphore bound is available to handle it 2026-02-20 10:30:42 +04:00
Aarsh Shah
7648f2855d send parts metadata correctly 2026-02-19 20:06:58 +04:00
Potuz
bf11ac0d64 Only call FCU pre-Gloas (#16373)
Also this PR removes the unnecessary pointer parameter fcuArgs since it
is no longer used by the caller.

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-02-19 15:47:21 +00:00
Aarsh Shah
92c24014a2 don't unmarshal message twice 2026-02-19 18:09:42 +04:00
Aarsh Shah
eda07c0198 changes as per review 2026-02-19 17:53:33 +04:00
Aarsh Shah
97a5b50c0b process incoming RPC correctly 2026-02-19 14:48:29 +04:00
Potuz
dfb918432f Gloas/forkchoice payload envelope (#16351)
Adds a method to insert full nodes after Gloas. Requires #16338
2026-02-19 10:10:55 +00:00
Aarsh Shah
76a68759c5 update recieved state on eager push for idempotency 2026-02-19 12:15:32 +04:00
Aarsh Shah
88834f044f remove excessive checks 2026-02-19 11:48:22 +04:00
Aarsh Shah
115a91d59b let caller format the error for the statekind 2026-02-19 11:37:22 +04:00
Aarsh Shah
fc3d41f90b Apply suggestions from code review
Co-authored-by: Marco Munizaga <git@marcopolo.io>
2026-02-19 11:30:05 +04:00
Aarsh Shah
d2ae2a4fa4 default to nothing available and nothing requested if peer state is empty 2026-02-19 11:29:15 +04:00
Aarsh Shah
7719686a41 root validation changes 2026-02-19 10:41:03 +04:00
Aarsh Shah
57a295f1a5 Avoid string allocation in map keys 2026-02-19 09:25:21 +04:00
terence
a28c6c8145 Add gloas block gossip changes (#16368)
This updates beacon-block gossip validation for Gloas to use the signed
execution payload bid instead of the execution payload. It removes
execution‑payload‑based checks and introduces bid‑based checks, plus
stubs for parent‑payload checks pending blockchain package support

Reference:
https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#beacon_block

fixes #https://github.com/OffchainLabs/prysm/issues/16372
2026-02-18 20:14:21 +00:00
Aarsh Shah
a0630aec02 final changes 2026-02-18 15:18:02 +04:00
Bastin
045a3cfe4e GetVersionV2 endpoint (#16347)
**What does this PR do? Why is it needed?**
This PR adds the implementation for `GetVersionV2` based on
[ethereum/beacon-APIs#568](https://github.com/ethereum/beacon-APIs/pull/568).

Q: is there anything to be done for deprecating `GetVersionV1`?
2026-02-18 10:02:06 +00:00
Aarsh Shah
9c0fc75cd2 Merge branch 'feat/process-eager-partial-header' into feat/update-gossipsub-update-new-partial-API 2026-02-18 13:17:39 +04:00
Aarsh Shah
0f3f3124af final changes 2026-02-18 13:07:34 +04:00
terence
8ee28394ab Add Gloas attestation committee index validation (#16359)
Adds Gloas fork attestation validation rules for gossip processing. This
implements the new committee index validation requirements introduced in
the Gloas fork.

## Changes
- Uses attestation epoch to determine if Gloas rules apply
- **Committee index validation**: 
  - Committee index must be < 2 (0 or 1 only)
- Same-slot attestations (where `attestation.data.slot == block.slot`)
must use committee index 0
  - Different-slot attestations can use either committee index 0 or 1
2026-02-17 18:00:32 +00:00
Aarsh Shah
412648ac02 add changelog 2026-02-17 19:48:55 +04:00
Aarsh Shah
fd3587c932 get a green CI 2026-02-17 18:53:23 +04:00
Aarsh Shah
6ab9af51d6 fix bazel 2026-02-17 17:27:54 +04:00
Aarsh Shah
73c5d2648c Tests for partial data column 2026-02-17 17:15:51 +04:00
Aarsh Shah
9b315166de implement OnIncomingRPC 2026-02-17 16:29:00 +04:00
kasey
b31e2ffe51 avoid copying the finalized state when computing cgc (#16355)
Reviewing some (unrelated) sync code today I noticed that we are using a
stategen accessor for the finalized state which copies the entire state
object to look up validator balances to compute the custody_group_count.
This excess memory allocation is likely causing GC pressure and
increasing memory utilization.

This PR avoids state copying for this purpose by making the following
changes:
- Adds a new method to the `ReadOnlyBalances` state interface:
`EffectiveBalances([]primitives.ValidatorIndex) (uint64, []uint64,
error)`. This method computes returns the sum of the effective balances
of the given list of validator indices, a list with the individual
effective balance of each requested index (where the i-th element in the
return corresponds to the i-th element of the parameter), and an error -
which is necessary due to index bounds checks and quirks of multi-value
slice that can apparently result in the state being unusable for such
lookups if not correctly initialized.
- Adds a new method to the stategen interface
`FinalizedReadOnlyBalances`, which returns the finalized state asserted
to the `ReadOnlyBalances` interface.
- Switches the peerdas code to use the sum given by `EffectiveBalances`.

There was some existing nil checking code in the peerdas package that I
didn't want to modify, so I added a new compound interface in stategen
to allow the returned state to also expose the `IsNil` method.

fixes https://github.com/OffchainLabs/prysm/issues/16354

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2026-02-17 08:50:58 +00:00
Aarsh Shah
938dda3025 handle gossip for peer 2026-02-17 09:36:21 +04:00
Aarsh Shah
55f5973dad for peer implementation 2026-02-16 20:16:12 +04:00
Aarsh Shah
867978e79b update gossipsub 2026-02-16 16:13:37 +04:00
Aarsh Shah
c8af4b74b5 rename to getBlobsCalled 2026-02-16 15:03:15 +04:00
Aarsh Shah
ab9366ed5f event loop should never block 2026-02-16 14:27:09 +04:00
terence
22e77add54 Refactor function ProcessExecutionPayload with ApplyExecutionPayload (#16356)
This PR refactors `ProcessExecutionPayload` with `ApplyExecutionPayload`
so caller can use Apply method to calculate post state root. Note that
validations are not required for Apply function. We really need the
state mutation lines that's:
```
1. Ensure latest_block_header.state_root is set (if zero, set it to the pre‑payload HashTreeRoot)...
2. processExecutionRequests()
3. QueueBuilderPayment()
4. SetExecutionPayloadAvailability(state.Slot(), true)
5. SetLatestBlockHash(payload.BlockHash())
```
I decided to keep them there because a. it's cheap b. it makes refactor
cleaner to reason c. API/caller may want to validate envelope and bid
consistency (ex: beacon api has option to validate consensus)
2026-02-13 15:51:22 +00:00
terence
7f9983386e gloas: add new execution payload envelope processing (#15656)
This PR implements
[process_execution_payload](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#new-process_execution_payload)
and spec tests.
2026-02-12 21:56:41 +00:00
Potuz
935acf6b9d Gloas/forkchoice (#16338)
Gloas Initial forkchoice implementation

This is the bare minimum to have the **current fork** logic being
compatible with having full/empty slots in forkchoice. That is,
forkchoice is designed as if we had the option to have full/empty slots,
but we still create the full node when we import a block pre-gloas.

No PTC and no ordering between empty/full is yet implemented, it just
always choses the full child between the two.

The *Node structure corresponds to the `PAYLOAD_PENDING_STATUS` in the
spec, while the enclosing *PayloadNode structures are the full
forkchoice nodes that either have full or emtpy status.

The most complicated path to check in this PR is the working of
`SetOptimisticToInvalid` that handles invalid block insertion from the
EL. There are many options, either the passed root exits or not in
forkchoice and wether this invalid node built on top of full or empty,
which is particularly ugly because if the node is not in forkchoice we
still need to find out if its parent was full or empty.

-[x] compiling beacon chain
-[x] fix tests
2026-02-12 19:39:56 +00:00
terence
63b69a63b1 Add gossip for payload envelope (#16349)
This PR implements gossip for execution payload envelope as outlined in
the following spec:

https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#execution_payload
2026-02-12 16:52:13 +00:00
satushh
cf63d112be Graffiti implementation (#16089)
<!-- Thanks for sending a PR! Before submitting:

1. If this is your first PR, check out our contribution guide here
https://docs.prylabs.network/docs/contribute/contribution-guidelines
You will then need to sign our Contributor License Agreement (CLA),
which will show up as a comment from a bot in this pull request after
you open it. We cannot review code without a signed CLA.
2. Please file an associated tracking issue if this pull request is
non-trivial and requires context for our team to understand. All
features and most bug fixes should have
an associated issue with a design discussed and decided upon. Small bug
   fixes and documentation improvements don't need issues.
3. New features and bug fixes must have tests. Documentation may need to
be updated. If you're unsure what to update, send the PR, and we'll
discuss
   in review.
4. Note that PRs updating dependencies and new Go versions are not
accepted.
   Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->

**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR implements graffiti as described in the corresponding spec doc
`graffiti-proposal-brief.md `

**Which issues(s) does this PR fix?**

- https://github.com/OffchainLabs/prysm/issues/13558

**Other notes for review**

**Acknowledgements**

- [ ] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description to this PR with sufficient context for
reviewers to understand this PR.
2026-02-12 12:35:15 +00:00
terence
6c045083a6 gloas: add modified attestation processing (#15736)
This PR implements
[process_attestation](https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#modified-process_attestation)
alongside spec tests

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-02-11 23:22:42 +00:00
james-prysm
09d0338aa9 adding db functions for saving gloas block and payload (#16301)
<!-- Thanks for sending a PR! Before submitting:

1. If this is your first PR, check out our contribution guide here
https://docs.prylabs.network/docs/contribute/contribution-guidelines
You will then need to sign our Contributor License Agreement (CLA),
which will show up as a comment from a bot in this pull request after
you open it. We cannot review code without a signed CLA.
2. Please file an associated tracking issue if this pull request is
non-trivial and requires context for our team to understand. All
features and most bug fixes should have
an associated issue with a design discussed and decided upon. Small bug
   fixes and documentation improvements don't need issues.
3. New features and bug fixes must have tests. Documentation may need to
be updated. If you're unsure what to update, send the PR, and we'll
discuss
   in review.
4. Note that PRs updating dependencies and new Go versions are not
accepted.
   Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->

**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

gloas doesn't have the concept of blinded block anymore so instead we
save the full gloas block. that being said the full block does not
contain the payload envelope so there are separate functions for saving
those. this pr introduces these types and functions, the payload
envelope doesn't actually get saved yet in this pr.

a TODO comment is added for pruning as well

references epbs branch

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-11 17:59:25 +00:00
Aarsh Shah
fd07e59c0a bound number of go-routines to handle header 2026-02-11 12:26:49 +04:00
Aarsh Shah
d66d25e7ad fix lint 2026-02-11 11:49:25 +04:00
Aarsh Shah
89ab513183 don't block the event loop on handling headers 2026-02-11 11:43:20 +04:00
Aarsh Shah
1c6110b6e8 move initialisation of validators and handlers for partial data columns to the Start method 2026-02-11 10:37:06 +04:00
james-prysm
eaf3aa3e8e simplify get and post block parsing for REST (#16307)
**What type of PR is this?**

 Feature


**What does this PR do? Why is it needed?**

PR is attempts to remove code duplication and process through a map of
configurations for get and post block apis. this should simplify
maintainability.

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-10 18:28:55 +00:00
terence
9c49bb484c Add gossip for payload attestation (#16333)
This PR implements gossip validation and subscription for payload
attestation
2026-02-10 16:17:22 +00:00
Aarsh Shah
a82edc7fbc fix lint 2026-02-10 12:11:14 +04:00
Aarsh Shah
39bb8d2929 Merge branch 'rebased-partial-columns' into feat/process-eager-partial-header 2026-02-10 12:10:14 +04:00
Marco Munizaga
80e0227bac Partial columns addendum (#16327)
see commits for details. best reviewed commit by commit.
2026-02-10 12:07:27 +04:00
Aarsh Shah
387cfcd442 only complete column if it has been extended 2026-02-10 11:50:11 +04:00
Aarsh Shah
c70e51b445 fix test 2026-02-10 10:12:18 +04:00
Aarsh Shah
a1a8a86341 extend existing cells 2026-02-10 09:41:55 +04:00
terence
bb0f70ad60 gloas: add read only wrapper for payload envelope (#16339)
This PR adds read only wrapper for execution payload envelope
2026-02-09 17:23:12 +00:00
Aarsh Shah
71b1610331 publish headers 2026-02-09 20:37:12 +04:00
Aarsh Shah
814abab4b5 wait for header to be processed 2026-02-09 18:47:47 +04:00
satushh
dc66f8872d Close libp2p host (#16313)
**What type of PR is this?**

 Other

**What does this PR do? Why is it needed?**

Close host to save resource

**Which issues(s) does this PR fix?**

Fixes #

**Other notes for review**

**Acknowledgements**

- [ ] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2026-02-09 14:40:07 +00:00
Preston Van Loon
db2bb5505c db: Copy byte slices that live outside of the view transaction (#16332)
**What type of PR is this?**

Bug fix

**What does this PR do? Why is it needed?**

The bbolt documentation suggests that the byte slices used during the
View transaction should not be used outside of the transaction and
mutating those slices could break things.

**Which issues(s) does this PR fix?**

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-07 14:10:07 +00:00
terence
14f01bbc6c gloas: move kzg commitments to bid (#16309)
This PR moves kzg commitments to bid. The rationale behind is captured
in this [issue](https://github.com/ethereum/consensus-specs/issues/4870)
* Moves blob KZG commitments to the earlier point where builder intent
is known
* Removes duplicated commitments from data column sidecars which saves
descent b/w per slot
* Enables nodes to start fetching blobs via getBlobs as soon as the bid
is received
* Slightly increases bid size and may add minor bidding channel latency
but the tradeoff favors lower network load and simpler DA handling
2026-02-06 23:07:54 +00:00
Potuz
c3e74e4a5d Remove unused method in forkchoice (#16337) 2026-02-06 19:21:58 +00:00
Potuz
e7ae6a004b Remove unused method in forkchoice (#16331) 2026-02-06 15:30:50 +00:00
Aarsh Shah
d0d6bab8a0 handle header 2026-02-06 19:21:43 +04:00
Aarsh Shah
3f6c01fc3b changes as per review 2026-02-06 18:44:58 +04:00
Bastin
862fb2eb4a Fix gen-logs.sh - gitignore bug (#16328)
**What does this PR do? Why is it needed?**
`gen-logs.sh` was skipping `cmd/beacon-chain/execution/` due to a rule
in `.gitignore`.
Added a fix in `gen-logs.sh` to ignore `.gitignore` entries by
specification.
2026-02-05 14:06:42 +00:00
Potuz
bb80a9c832 Remove unused map in forkchoice (#16329)
nodeByPayload was not being used.
2026-02-05 13:25:06 +00:00
Bastin
c1b668a50a Fix logging issue (#16322)
**What does this PR do? Why is it needed?**
This PR, in an attempt to fix the logging issue described in #16314,
does the following:
- Adds a new field `Identifier` to the `WriterHook` struct, and filters
out log entries that have the key `log_target` and the value of the
hook's `Identifier`. For now the identifiers are `ephemeral` and `user`,
differentiating between the user facing terminal/log file, and the
debugger facing ephemeral log file.
- Stores the value of the `--verbosity` and `--log.vmodule` flags in
`io/logs`, so it can be accessed by packages that need to know the
verbosity they're logging with. (note that since #16272 each package can
have a different verbosity, so verbosity is now defined per package
instead of globally)
- Improves the calculation of the global logging level by ignoring the
`ephemeralLogFileVerbosity` when the `--disable-ephemeral-log-file` flag
is enabled.
- Uses these added logic to fix the problem in
`logStateTransitionData()` (described in #16314)

Note: since we're saving this new data in `io/logs`, we should refactor
`prefixFormatter` to read the data from here. but that creates a
circular import error. I will try to fix this and refactor the formatter
in a future PR.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2026-02-05 10:24:50 +00:00
Justin Traglia
fab687d96d Improve ethspecify integration (#16304)
**What type of PR is this?**

Documentation

**What does this PR do? Why is it needed?**

* Move the ethspecify config from `/specrefs/.ethspecify` to
`/.ethspecify`.
* This allows developers to use inline specrefs (eg spec functions in
godoc comments).
* To do this, simply add a spec tag and run `ethspecify` to populate it.
* Clean up specref exceptions; organize by upgrade & put items in the
correct section.
* Update a few godoc comments to use the new inline specref feature.
* Update check-specrefs GitHub action so that it enforces up-to-date
godocs.
* Standardize specref naming; requiring a `#fork` tag for everything.
* Add new specrefs (which haven't been implemented yet) which were
missing.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2026-02-04 18:44:01 +00:00
james-prysm
cf94ccbf72 node fallback cleanup (#16316)
**What type of PR is this?**

 Other

**What does this PR do? Why is it needed?**

Follow up to https://github.com/OffchainLabs/prysm/pull/16215 this pr
improves logging, fixes stuttering in package naming, adds additional
unit tests, and deduplicates fallback node code.

**Which issues(s) does this PR fix?**

fixes a potential race if reconnecting to the same host very quickly
which has a stale connection still.

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-04 15:59:42 +00:00
Aarsh Shah
c5914ea4d9 handle partial data column header 2026-02-04 19:05:03 +04:00
Aarsh Shah
08d143bd2c add type for partial header reconstruction 2026-02-04 18:27:54 +04:00
Aarsh Shah
b36bc1fe17 Merge branch 'develop' into rebased-partial-columns 2026-02-04 10:29:56 +04:00
Aarsh Shah
a329a77037 fix partial columns broadcast (#16321)
Fix the data column broadcast logic for partial and full columns .
2026-02-04 10:25:53 +04:00
Aarsh Shah
75895c1e0b fix: Set Beacon Node Options after reading the config file (#16320)
**What type of PR is this?**
Bug fix

**What does this PR do? Why is it needed?**
This PR ensures that we set the beacon node options AFTER reading the
config file (if one is given to override the defaults).

**Which issues(s) does this PR fix?**
It fixes the issue that Barnabas reported around the
"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS" override not being
respected (and potentially other issues resulting from setting the
options before reading the config).

Fixes #

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-03 16:51:01 +00:00
Aarsh Shah
bfd9ff8651 fix lint 2026-02-03 18:10:35 +04:00
Aarsh Shah
79301d4db6 fix bazel 2026-02-03 18:03:26 +04:00
Aarsh Shah
fbfea6f753 fix log files CI error 2026-02-03 17:54:41 +04:00
Aarsh Shah
562ef25527 fix CI 2026-02-03 17:47:40 +04:00
Aarsh Shah
488971f989 fix CI 2026-02-03 17:44:33 +04:00
Aarsh Shah
b129eaaeb8 fix lint 2026-02-03 17:01:45 +04:00
Aarsh Shah
90302adbd2 fix CI 2026-02-03 16:17:36 +04:00
Aarsh Shah
a6262ba07b go mod tidy and partial columns 2026-02-03 15:23:59 +04:00
Aarsh Shah
7d5c8d6964 Merge branch 'develop' into rebased-partial-columns 2026-02-03 10:20:18 +04:00
Preston Van Loon
d1b9281677 golangci-lint: Remove test exclusion from formatting (#16318)
**What type of PR is this?**

> Other

**What does this PR do? Why is it needed?**

**Which issues(s) does this PR fix?**

Follow up to #16311

**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-02-02 17:42:05 +00:00
Marco Munizaga
38cae3f8de add todos from call
this came from a call with Aarsh and Kasey
2026-02-02 11:05:40 -06:00
Marco Munizaga
a5e6d0a3ac Implement gossipsub_mesh_peers metric 2026-02-02 11:05:40 -06:00
Marco Munizaga
c4a308e598 Track number of peers in mesh
along with tracking which peers request partial messages
2026-02-02 11:05:40 -06:00
Marco Munizaga
ba720c1b4b update go-libp2p-pubsub with new tracer
the new tracer interface provides the peer ID in tracer.RecvRPC.
2026-02-02 11:05:40 -06:00
Marco Munizaga
5a3f45f91f add todo 2026-02-02 11:05:40 -06:00
Marco Munizaga
29010edcd1 return valid if there are no datacolumns to validate 2026-02-02 11:05:40 -06:00
Marco Munizaga
045c29ccce cache partial data column header by group ID 2026-02-02 11:05:40 -06:00
Marco Munizaga
0d80bbe44f Add partial message metrics 2026-02-02 11:05:40 -06:00
Marco Munizaga
14f13ed902 more context around errors 2026-02-02 11:05:40 -06:00
Marco Munizaga
308199c2e7 fix test typo 2026-02-02 11:05:40 -06:00
Marco Munizaga
7a04c6b645 eagerly push the partial data column header 2026-02-02 11:05:40 -06:00
Marco Munizaga
93cda45e18 Include the version byte in the group ID 2026-02-02 11:05:40 -06:00
Marco Munizaga
1b4265ef3f add partial data column header 2026-02-02 11:05:40 -06:00
Marco Munizaga
90d1716fd7 Update go-libp2p-pubsub 2026-02-02 11:05:40 -06:00
Marco Munizaga
c4f1a9ac4f add todo 2026-02-02 11:05:40 -06:00
Marco Munizaga
73d948a710 add partial-data-columns flag 2026-02-02 11:05:38 -06:00
Marco Munizaga
5e2985d36b beacon-chain/sync: subscribe to partial columns 2026-02-02 11:04:53 -06:00
Marco Munizaga
7ac3f3cb68 publish partial columns when proposing a block 2026-02-02 11:04:53 -06:00
Marco Munizaga
0a759c3d15 beacon-chain/execution: return partial columns and use getBlobsV3
... if available
2026-02-02 11:04:53 -06:00
Marco Munizaga
bf7ca00780 core/peerdas: Add PartialColumns helper 2026-02-02 11:04:52 -06:00
Marco Munizaga
efcb98bcaa core/peerdas: support partial responses 2026-02-02 11:04:52 -06:00
Marco Munizaga
7b0de5ad0e beacon-chain/p2p: own and start PartialColumnBroadcaster 2026-02-02 11:04:52 -06:00
Marco Munizaga
2e15cd2068 Add metrics for gossipsub message sizes 2026-02-02 11:04:52 -06:00
Marco Munizaga
2e5192e496 Implement PartialColumnBroadcaster 2026-02-02 11:04:52 -06:00
Marco Munizaga
729c54a300 refactor DataColumn Cell KZG Proof verification 2026-02-02 11:04:52 -06:00
Marco Munizaga
4556aa266a avoid needless copy 2026-02-02 11:04:52 -06:00
Marco Munizaga
2f1cac217d Add PartialDataColumn type 2026-02-02 11:04:52 -06:00
Marco Munizaga
6bfc779ea1 proto: Add PartialDataColumnSidecar 2026-02-02 11:04:52 -06:00
Marco Munizaga
90481d6aa8 clone slice in testing util 2026-02-02 11:04:52 -06:00
Marco Munizaga
9f64007dc1 logrusadapter for slog 2026-02-02 11:04:52 -06:00
Marco Munizaga
879ea624ec fix multiaddr comparison 2026-02-02 11:04:52 -06:00
Marco Munizaga
79841f451c deps: update libp2p deps
for partial message support and simnet support
2026-02-02 11:04:52 -06:00
james-prysm
641d90990d grpc fallback improvements (#16215)
<!-- Thanks for sending a PR! Before submitting:

1. If this is your first PR, check out our contribution guide here
https://docs.prylabs.network/docs/contribute/contribution-guidelines
You will then need to sign our Contributor License Agreement (CLA),
which will show up as a comment from a bot in this pull request after
you open it. We cannot review code without a signed CLA.
2. Please file an associated tracking issue if this pull request is
non-trivial and requires context for our team to understand. All
features and most bug fixes should have
an associated issue with a design discussed and decided upon. Small bug
   fixes and documentation improvements don't need issues.
3. New features and bug fixes must have tests. Documentation may need to
be updated. If you're unsure what to update, send the PR, and we'll
discuss
   in review.
4. Note that PRs updating dependencies and new Go versions are not
accepted.
   Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->

**What type of PR is this?**

## Summary

This PR implements gRPC fallback support for the validator client,
allowing it to automatically switch between multiple beacon node
endpoints when the primary node becomes unavailable or unhealthy.

## Changes

- Added `grpcConnectionProvider` to manage multiple gRPC connections
with circular failover
- Validator automatically detects unhealthy beacon nodes and switches to
the next available endpoint
- Health checks verify both node responsiveness AND sync status before
accepting a node
- Improved logging to only show "Found fully synced beacon node" when an
actual switch occurs (reduces log noise)


I removed the old middleware that uses gRPC's built in load balancer
because:

- gRPC's pick_first load balancer doesn't provide sync-status-aware
failover
- The validator needs to ensure it connects to a fully synced node, not
just a reachable one

## Test Scenario

### Setup
Deployed a 4-node Kurtosis testnet with local validator connecting to 2
beacon nodes:

```yaml
# kurtosis-grpc-fallback-test.yaml
participants:
  - el_type: nethermind
    cl_type: prysm
    validator_count: 128  # Keeps chain advancing
  - el_type: nethermind
    cl_type: prysm
    validator_count: 64
  - el_type: nethermind
    cl_type: prysm
    validator_count: 64   # Keeps chain advancing
  - el_type: nethermind
    cl_type: prysm
    validator_count: 64   # Keeps chain advancing

network_params:
  fulu_fork_epoch: 0
  seconds_per_slot: 6
```

Local validator started with:
```bash
./validator --beacon-rpc-provider=127.0.0.1:33005,127.0.0.1:33012 ...
```

### Test 1: Primary Failover (cl-1 → cl-2)

1. Stopped cl-1 beacon node
2. Validator detected failure and switched to cl-2

**Logs:**
```
WARN  Beacon node is not responding, switching host currentHost=127.0.0.1:33005 nextHost=127.0.0.1:33012
DEBUG Trying gRPC endpoint newHost=127.0.0.1:33012 previousHost=127.0.0.1:33005
INFO  Failover succeeded: connected to healthy beacon node failedAttempts=[127.0.0.1:33005] newHost=127.0.0.1:33012 previousHost=127.0.0.1:33005
```

**Result:**  PASSED - Validator continued submitting attestations on
cl-2

### Test 2: Circular Failover (cl-2 → cl-1)

1. Restarted cl-1, stopped cl-2
2. Validator detected failure and switched back to cl-1

**Logs:**
```
WARN  Beacon node is not responding, switching host currentHost=127.0.0.1:33012 nextHost=127.0.0.1:33005
DEBUG Trying gRPC endpoint newHost=127.0.0.1:33005 previousHost=127.0.0.1:33012
INFO  Failover succeeded: connected to healthy beacon node failedAttempts=[127.0.0.1:33012] newHost=127.0.0.1:33005 previousHost=127.0.0.1:33012
```

**Result:**  PASSED - Circular fallback works correctly

## Key Log Messages

| Log Level | Message | Source |
|-----------|---------|--------|
| WARN | "Beacon node is not responding, switching host" |
`changeHost()` in validator.go |
| INFO | "Switched gRPC endpoint" | `SetHost()` in
grpc_connection_provider.go |
| INFO | "Found fully synced beacon node" | `FindHealthyHost()` in
validator.go (only on actual switch) |

## Test Plan

- [x] Verify primary failover (cl-1 → cl-2)
- [x] Verify circular failover (cl-2 → cl-1)
- [x] Verify validator continues producing attestations after switch
- [x] Verify "Found fully synced beacon node" only logs on actual switch
(not every health check)

**What does this PR do? Why is it needed?**

**Which issues(s) does this PR fix?**

Fixes # https://github.com/OffchainLabs/prysm/pull/7133


**Other notes for review**

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2026-02-02 14:51:56 +00:00
terence
d2fc250f34 Run go fmt (#16311)
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2026-02-02 14:19:15 +00:00
Jun Song
571c6f39aa Add docs for SSZ Query package (#16299)
**What type of PR is this?**

Documentation

**What does this PR do? Why is it needed?**

Although godoc and comments are well-written in `encoding/ssz/query`
package, we (@rkapka, @fernantho, @syjn99)
[agreed](https://discord.com/channels/476244492043812875/1387734369527136297/1466075406523174944)
that it would be great to have human-readable documentation.

**Which issues(s) does this PR fix?**

Part of  #15587 & #15598 

**Other notes for review**

This documentation is first drafted by Claude Code, and then has a few
rounds of self-review.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [ ] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).

---------

Co-authored-by: fernantho <fernantho1@gmail.com>
Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>
2026-02-01 03:39:53 +00:00
Justin Traglia
55fe85c887 Add ability to download nightly tests from a specific night (#16298)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR allows devs to test against a specific run of the nightly
reference test generator.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-01-29 21:38:13 +00:00
Justin Traglia
31f77567dd Add a README for specrefs (#16302)
**What type of PR is this?**

Documentation

**What does this PR do? Why is it needed?**

This PR adds a basic README for the specrefs.


**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-01-29 20:36:29 +00:00
terence
a7fdd11777 gloas: sample PTC per committee (#16293)
This PR updates `get_ptc` construction to sample ptc
committee-by-committee instead of concatenating all beacon committees
into a large slice. No functional changes to payload attestation
verification
2026-01-29 14:21:54 +00:00
801 changed files with 47218 additions and 9928 deletions

View File

@@ -1,25 +1,39 @@
version: v1.7.0-alpha.1
version: v1.7.0-alpha.2
style: full
specrefs:
search_root: ..
search_root: .
auto_standardize_names: true
auto_add_missing_entries: true
require_exceptions_have_fork: true
files:
- configs.yml
- constants.yml
- containers.yml
- dataclasses.yml
- functions.yml
- presets.yml
- specrefs/configs.yml
- specrefs/constants.yml
- specrefs/containers.yml
- specrefs/dataclasses.yml
- specrefs/functions.yml
- specrefs/presets.yml
exceptions:
presets:
# Not implemented: gloas (future fork)
# gloas
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
- MAX_PAYLOAD_ATTESTATIONS#gloas
- PTC_SIZE#gloas
constants:
# Constants in the KZG library
# phase0
- BASIS_POINTS#phase0
- ENDIANNESS#phase0
- MAX_CONCURRENT_REQUESTS#phase0
- UINT64_MAX#phase0
- UINT64_MAX_SQRT#phase0
# altair
- PARTICIPATION_FLAG_WEIGHTS#altair
# bellatrix
- SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix
# deneb
- BLS_MODULUS#deneb
- BYTES_PER_COMMITMENT#deneb
- BYTES_PER_FIELD_ELEMENT#deneb
@@ -33,18 +47,9 @@ exceptions:
- PRIMITIVE_ROOT_OF_UNITY#deneb
- RANDOM_CHALLENGE_KZG_BATCH_DOMAIN#deneb
- RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu
# Not implemented
- BASIS_POINTS#phase0
- ENDIANNESS#phase0
- MAX_CONCURRENT_REQUESTS#phase0
- PARTICIPATION_FLAG_WEIGHTS#altair
- SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix
# fulu
- UINT256_MAX#fulu
- UINT64_MAX#phase0
- UINT64_MAX_SQRT#phase0
# Not implemented: gloas (future fork)
# gloas
- BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas
- BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas
- BUILDER_WITHDRAWAL_PREFIX#gloas
@@ -61,61 +66,60 @@ exceptions:
- PTC_TIMELINESS_INDEX#gloas
configs:
# Not implemented: gloas (future fork)
# gloas
- AGGREGATE_DUE_BPS_GLOAS#gloas
- ATTESTATION_DUE_BPS_GLOAS#gloas
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
- GLOAS_FORK_EPOCH#gloas
- GLOAS_FORK_VERSION#gloas
- MAX_REQUEST_PAYLOADS#gloas
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
ssz_objects:
# Not implemented
# phase0
- Eth1Block#phase0
- MatrixEntry#fulu
# Not implemented: capella
# capella
- LightClientBootstrap#capella
- LightClientFinalityUpdate#capella
- LightClientOptimisticUpdate#capella
- LightClientUpdate#capella
# Not implemented: gloas (future fork)
# fulu
- MatrixEntry#fulu
# gloas
- BeaconBlockBody#gloas
- BeaconState#gloas
- Builder#gloas
- BuilderPendingPayment#gloas
- BuilderPendingWithdrawal#gloas
- DataColumnSidecar#gloas
- ExecutionPayloadEnvelope#gloas
- ExecutionPayloadBid#gloas
- ExecutionPayloadEnvelope#gloas
- ForkChoiceNode#gloas
- IndexedPayloadAttestation#gloas
- PayloadAttestation#gloas
- PayloadAttestationData#gloas
- PayloadAttestationMessage#gloas
- SignedExecutionPayloadEnvelope#gloas
- SignedExecutionPayloadBid#gloas
- Builder#gloas
- ProposerPreferences#gloas
- SignedExecutionPayloadBid#gloas
- SignedExecutionPayloadEnvelope#gloas
- SignedProposerPreferences#gloas
dataclasses:
# Not implemented
- BlobParameters#fulu
- ExpectedWithdrawals#capella
- ExpectedWithdrawals#electra
# phase0
- LatestMessage#phase0
- LightClientStore#altair
- OptimisticStore#bellatrix
- Store#phase0
# Not implemented: capella
# altair
- LightClientStore#altair
# bellatrix
- OptimisticStore#bellatrix
# capella
- ExpectedWithdrawals#capella
- LightClientStore#capella
# Not implemented: gloas (future fork)
# electra
- ExpectedWithdrawals#electra
# fulu
- BlobParameters#fulu
# gloas
- ExpectedWithdrawals#gloas
- LatestMessage#gloas
- Store#gloas
@@ -140,7 +144,6 @@ exceptions:
- g1_lincomb#deneb
- hash_to_bls_field#deneb
- is_power_of_two#deneb
- multi_exp#deneb
- reverse_bits#deneb
- validate_kzg_g1#deneb
- verify_blob_kzg_proof#deneb
@@ -175,7 +178,12 @@ exceptions:
- verify_cell_kzg_proof_batch#fulu
- verify_cell_kzg_proof_batch_impl#fulu
# Not implemented: phase0
# phase0
- update_proposer_boost_root#phase0
- is_proposer_equivocation#phase0
- record_block_timeliness#phase0
- compute_proposer_score#phase0
- get_attestation_score#phase0
- calculate_committee_fraction#phase0
- compute_fork_version#phase0
- compute_pulled_up_tip#phase0
@@ -221,8 +229,7 @@ exceptions:
- validate_on_attestation#phase0
- validate_target_epoch_against_current_time#phase0
- xor#phase0
# Not implemented: altair
# altair
- compute_merkle_proof#altair
- compute_sync_committee_period_at_slot#altair
- get_contribution_and_proof#altair
@@ -244,27 +251,29 @@ exceptions:
- process_sync_committee_contributions#altair
- set_or_append_list#altair
- validate_light_client_update#altair
# Not implemented: bellatrix
# bellatrix
- get_execution_payload#bellatrix
- is_merge_transition_block#bellatrix
- is_optimistic_candidate_block#bellatrix
- latest_verified_ancestor#bellatrix
- prepare_execution_payload#bellatrix
# Not implemented: capella
# capella
- apply_withdrawals#capella
- get_balance_after_withdrawals#capella
- get_lc_execution_root#capella
- get_validators_sweep_withdrawals#capella
- is_valid_light_client_header#capella
- prepare_execution_payload#capella
- process_epoch#capella
- update_next_withdrawal_index#capella
- update_next_withdrawal_validator_index#capella
- upgrade_lc_bootstrap_to_capella#capella
- upgrade_lc_finality_update_to_capella#capella
- upgrade_lc_header_to_capella#capella
- upgrade_lc_optimistic_update_to_capella#capella
- upgrade_lc_store_to_capella#capella
- upgrade_lc_update_to_capella#capella
# Not implemented: deneb
# deneb
- get_lc_execution_root#deneb
- is_valid_light_client_header#deneb
- prepare_execution_payload#deneb
@@ -274,33 +283,34 @@ exceptions:
- upgrade_lc_optimistic_update_to_deneb#deneb
- upgrade_lc_store_to_deneb#deneb
- upgrade_lc_update_to_deneb#deneb
# Not implemented: electra
# electra
- compute_weak_subjectivity_period#electra
- current_sync_committee_gindex_at_slot#electra
- finalized_root_gindex_at_slot#electra
- get_eth1_vote#electra
- get_lc_execution_root#electra
- get_pending_partial_withdrawals#electra
- get_validators_sweep_withdrawals#electra
- is_compounding_withdrawal_credential#electra
- is_eligible_for_partial_withdrawals#electra
- is_within_weak_subjectivity_period#electra
- next_sync_committee_gindex_at_slot#electra
- normalize_merkle_branch#electra
- prepare_execution_payload#electra
- update_pending_partial_withdrawals#electra
- upgrade_lc_bootstrap_to_electra#electra
- upgrade_lc_finality_update_to_electra#electra
- upgrade_lc_header_to_electra#electra
- upgrade_lc_optimistic_update_to_electra#electra
- upgrade_lc_store_to_electra#electra
- upgrade_lc_update_to_electra#electra
# Not implemented: fulu
# fulu
- compute_matrix#fulu
- get_blob_parameters#fulu
- get_data_column_sidecars_from_block#fulu
- get_data_column_sidecars_from_column_sidecar#fulu
- recover_matrix#fulu
# Not implemented: gloas (future fork)
# gloas
- compute_balance_weighted_acceptance#gloas
- compute_balance_weighted_selection#gloas
- compute_fork_version#gloas
@@ -368,49 +378,42 @@ exceptions:
- verify_execution_payload_bid_signature#gloas
- add_builder_to_registry#gloas
- apply_deposit_for_builder#gloas
- apply_withdrawals#capella
- apply_withdrawals#gloas
- can_builder_cover_bid#gloas
- compute_proposer_score#phase0
- convert_builder_index_to_validator_index#gloas
- convert_validator_index_to_builder_index#gloas
- get_attestation_score#gloas
- get_attestation_score#phase0
- get_balance_after_withdrawals#capella
- get_builder_from_deposit#gloas
- get_builder_withdrawals#gloas
- get_builders_sweep_withdrawals#gloas
- get_index_for_new_builder#gloas
- get_pending_balance_to_withdraw_for_builder#gloas
- get_pending_partial_withdrawals#electra
- get_proposer_preferences_signature#gloas
- get_upcoming_proposal_slots#gloas
- get_validators_sweep_withdrawals#capella
- get_validators_sweep_withdrawals#electra
- initiate_builder_exit#gloas
- is_active_builder#gloas
- is_builder_index#gloas
- is_data_available#gloas
- is_eligible_for_partial_withdrawals#electra
- is_head_late#gloas
- is_head_weak#gloas
- is_parent_strong#gloas
- is_proposer_equivocation#phase0
- is_valid_proposal_slot#gloas
- onboard_builders_from_pending_deposits#gloas
- process_deposit_request#gloas
- process_voluntary_exit#gloas
- record_block_timeliness#gloas
- record_block_timeliness#phase0
- verify_data_column_sidecar_kzg_proofs#gloas
- should_apply_proposer_boost#gloas
- update_builder_pending_withdrawals#gloas
- update_next_withdrawal_builder_index#gloas
- update_next_withdrawal_index#capella
- update_next_withdrawal_validator_index#capella
- update_payload_expected_withdrawals#gloas
- update_pending_partial_withdrawals#electra
- update_proposer_boost_root#gloas
- update_proposer_boost_root#phase0
presets:
# gloas
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
- BUILDER_REGISTRY_LIMIT#gloas
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas

View File

@@ -12,11 +12,11 @@ jobs:
- name: Check version consistency
run: |
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
echo "Version mismatch between WORKSPACE and ethspecify"
echo " WORKSPACE: $WORKSPACE_VERSION"
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
exit 1
else
echo "Versions match: $WORKSPACE_VERSION"
@@ -26,7 +26,7 @@ jobs:
run: python3 -mpip install ethspecify
- name: Update spec references
run: ethspecify process --path=specrefs
run: ethspecify
- name: Check for differences
run: |
@@ -40,4 +40,4 @@ jobs:
fi
- name: Check spec references
run: ethspecify check --path=specrefs
run: ethspecify check

View File

@@ -1,6 +1,6 @@
name: Protobuf Format
on:
on:
push:
branches: [ '*' ]
pull_request:
@@ -12,10 +12,14 @@ jobs:
clang-format-checking:
runs-on: ubuntu-4
steps:
- uses: actions/checkout@v2
# Is this step failing for you?
- uses: actions/checkout@v6
# Is this step failing for you?
# Run: clang-format -i proto/**/*.proto
# See: https://clang.llvm.org/docs/ClangFormat.html
- uses: RafikFarhad/clang-format-github-action@v3
with:
sources: "proto/**/*.proto"
- name: Install clang-format
run: |
sudo apt-get update -qq
sudo apt-get install -y clang-format
- name: Check protobuf formatting
run: |
clang-format --style=LLVM --dry-run --Werror proto/**/*.proto

View File

@@ -2,7 +2,7 @@ name: Go
on:
push:
branches: [ master ]
branches: [ master, develop ]
pull_request:
branches: [ '*' ]
merge_group:

3
.gitignore vendored
View File

@@ -44,6 +44,3 @@ tmp
# spectest coverage reports
report.txt
# execution client data
execution/

View File

@@ -33,9 +33,8 @@ formatters:
generated: lax
paths:
- validator/web/site_data.go
- .*_test.go
- proto
- tools/analyzers
- third_party$
- builtin$
- examples$
- examples$

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.1"
consensus_spec_version = "v1.7.0-alpha.2"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

19
api/fallback/BUILD.bazel Normal file
View File

@@ -0,0 +1,19 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"fallback.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["fallback_test.go"],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
)

66
api/fallback/fallback.go Normal file
View File

@@ -0,0 +1,66 @@
package fallback
import (
"context"
"github.com/sirupsen/logrus"
)
// HostProvider is the subset of connection-provider methods that EnsureReady
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
// satisfy this interface.
type HostProvider interface {
Hosts() []string
CurrentHost() string
SwitchHost(index int) error
}
// ReadyChecker can report whether the current endpoint is ready.
// iface.NodeClient satisfies this implicitly.
type ReadyChecker interface {
IsReady(ctx context.Context) bool
}
// EnsureReady iterates through the configured hosts and returns true as soon as
// one responds as ready. It starts from the provider's current host and wraps
// around using modular arithmetic, performing failover when a host is not ready.
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
hosts := provider.Hosts()
numHosts := len(hosts)
startingHost := provider.CurrentHost()
var attemptedHosts []string
// Find current index
currentIdx := 0
for i, h := range hosts {
if h == startingHost {
currentIdx = i
break
}
}
for i := range numHosts {
if checker.IsReady(ctx) {
if len(attemptedHosts) > 0 {
log.WithFields(logrus.Fields{
"previous": startingHost,
"current": provider.CurrentHost(),
"tried": attemptedHosts,
}).Info("Switched to responsive beacon node")
}
return true
}
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
// Try next host if not the last iteration
if i < numHosts-1 {
nextIdx := (currentIdx + i + 1) % numHosts
if err := provider.SwitchHost(nextIdx); err != nil {
log.WithError(err).Error("Failed to switch host")
}
}
}
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
return false
}

View File

@@ -0,0 +1,94 @@
package fallback
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
)
// mockHostProvider is a minimal HostProvider for unit tests.
type mockHostProvider struct {
hosts []string
hostIndex int
}
func (m *mockHostProvider) Hosts() []string { return m.hosts }
func (m *mockHostProvider) CurrentHost() string {
return m.hosts[m.hostIndex%len(m.hosts)]
}
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
// mockReadyChecker records per-call IsReady results in sequence.
type mockReadyChecker struct {
results []bool
idx int
}
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
if m.idx >= len(m.results) {
return false
}
r := m.results[m.idx]
m.idx++
return r
}
func TestEnsureReady_SingleHostReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_SingleHostError(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 1, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, false, false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
hostIndex: 1,
}
// host1 (start) fails, host2 fails, host0 succeeds
checker := &mockReadyChecker{results: []bool{false, false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}

9
api/fallback/log.go Normal file
View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package fallback
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/fallback")

View File

@@ -3,13 +3,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"grpc_connection_provider.go",
"grpcutils.go",
"log.go",
"mock_grpc_provider.go",
"parameters.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
visibility = ["//visibility:public"],
deps = [
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
@@ -18,12 +21,17 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["grpcutils_test.go"],
srcs = [
"grpc_connection_provider_test.go",
"grpcutils_test.go",
],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)

View File

@@ -0,0 +1,186 @@
package grpc
import (
"context"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
// GrpcConnectionProvider manages gRPC connections for failover support.
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
type GrpcConnectionProvider interface {
// CurrentConn returns the currently active gRPC connection.
// The connection is created lazily on first call.
// Returns nil if the provider has been closed.
CurrentConn() *grpc.ClientConn
// CurrentHost returns the address of the currently active endpoint.
CurrentHost() string
// Hosts returns all configured endpoint addresses.
Hosts() []string
// SwitchHost switches to the endpoint at the given index.
// The new connection is created lazily on next CurrentConn() call.
SwitchHost(index int) error
// ConnectionCounter returns a monotonically increasing counter that increments
// each time SwitchHost changes the active endpoint. This allows consumers to
// detect connection changes even when the host string returns to a previous value
// (e.g., host0 → host1 → host0).
ConnectionCounter() uint64
// Close closes the current connection.
Close()
}
type grpcConnectionProvider struct {
// Immutable after construction - no lock needed for reads
endpoints []string
ctx context.Context
dialOpts []grpc.DialOption
// Current connection state (protected by mutex)
currentIndex uint64
conn *grpc.ClientConn
connCounter uint64
mu sync.Mutex
closed bool
}
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
// Only one connection is maintained at a time, created lazily on first use.
func NewGrpcConnectionProvider(
ctx context.Context,
endpoint string,
dialOpts []grpc.DialOption,
) (GrpcConnectionProvider, error) {
endpoints := parseEndpoints(endpoint)
if len(endpoints) == 0 {
return nil, errors.New("no gRPC endpoints provided")
}
log.WithFields(logrus.Fields{
"endpoints": endpoints,
"count": len(endpoints),
}).Info("Initialized gRPC connection provider")
return &grpcConnectionProvider{
endpoints: endpoints,
ctx: ctx,
dialOpts: dialOpts,
}, nil
}
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
func parseEndpoints(endpoint string) []string {
if endpoint == "" {
return nil
}
endpoints := make([]string, 0, 1)
for p := range strings.SplitSeq(endpoint, ",") {
if p = strings.TrimSpace(p); p != "" {
endpoints = append(endpoints, p)
}
}
return endpoints
}
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return nil
}
// Return existing connection if available
if p.conn != nil {
return p.conn
}
// Create connection lazily
ep := p.endpoints[p.currentIndex]
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
if err != nil {
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
return nil
}
p.conn = conn
log.WithField("endpoint", ep).Debug("Created gRPC connection")
return conn
}
func (p *grpcConnectionProvider) CurrentHost() string {
p.mu.Lock()
defer p.mu.Unlock()
return p.endpoints[p.currentIndex]
}
func (p *grpcConnectionProvider) Hosts() []string {
// Return a copy to maintain immutability
hosts := make([]string, len(p.endpoints))
copy(hosts, p.endpoints)
return hosts
}
func (p *grpcConnectionProvider) SwitchHost(index int) error {
if index < 0 || index >= len(p.endpoints) {
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
}
p.mu.Lock()
defer p.mu.Unlock()
if uint64(index) == p.currentIndex {
return nil // Already on this host
}
oldHost := p.endpoints[p.currentIndex]
oldConn := p.conn
p.conn = nil // Clear immediately - new connection created lazily
p.currentIndex = uint64(index)
p.connCounter++
// Close old connection asynchronously to avoid blocking the caller
if oldConn != nil {
go func() {
if err := oldConn.Close(); err != nil {
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
}
}()
}
log.WithFields(logrus.Fields{
"previousHost": oldHost,
"newHost": p.endpoints[index],
}).Debug("Switched gRPC endpoint")
return nil
}
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
p.mu.Lock()
defer p.mu.Unlock()
return p.connCounter
}
func (p *grpcConnectionProvider) Close() {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return
}
p.closed = true
if p.conn != nil {
if err := p.conn.Close(); err != nil {
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
}
p.conn = nil
}
}

View File

@@ -0,0 +1,207 @@
package grpc
import (
"context"
"net"
"reflect"
"strings"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func TestParseEndpoints(t *testing.T) {
tests := []struct {
name string
input string
expected []string
}{
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
{"empty string", "", nil},
{"only commas", ",,,", []string{}},
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseEndpoints(tt.input)
if !reflect.DeepEqual(tt.expected, got) {
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
}
})
}
}
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
t.Run("no endpoints", func(t *testing.T) {
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
require.ErrorContains(t, "no gRPC endpoints provided", err)
})
}
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
// Start only one server but configure provider with two endpoints
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
defer server.Stop()
validAddr := lis.Addr().String()
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
// Provider should succeed even though second endpoint is invalid (lazy connections)
endpoint := validAddr + "," + invalidAddr
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err, "Provider creation should succeed with lazy connections")
defer func() { provider.Close() }()
// First endpoint should work
conn := provider.CurrentConn()
assert.NotNil(t, conn, "First connection should be created lazily")
}
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
// Create provider with 3 endpoints
var addrs []string
var servers []*grpc.Server
for range 3 {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
addrs = append(addrs, lis.Addr().String())
servers = append(servers, server)
}
defer func() {
for _, s := range servers {
s.Stop()
}
}()
endpoint := strings.Join(addrs, ",")
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err)
defer func() { provider.Close() }()
// Access the internal state to verify single connection behavior
p := provider.(*grpcConnectionProvider)
// Initially no connection
p.mu.Lock()
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
p.mu.Unlock()
// Access connection - should create one
conn0 := provider.CurrentConn()
assert.NotNil(t, conn0)
p.mu.Lock()
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
firstConn := p.conn
p.mu.Unlock()
// Call CurrentConn again - should return same connection
conn0Again := provider.CurrentConn()
assert.Equal(t, conn0, conn0Again, "Should return same connection")
// Switch to different host - old connection should be closed, new one created lazily
require.NoError(t, provider.SwitchHost(1))
p.mu.Lock()
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SwitchHost (lazy)")
p.mu.Unlock()
// Get new connection
conn1 := provider.CurrentConn()
assert.NotNil(t, conn1)
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
}
// testProvider creates a provider with n test servers and returns cleanup function.
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
var addrs []string
var cleanups []func()
for range n {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
addrs = append(addrs, lis.Addr().String())
cleanups = append(cleanups, server.Stop)
}
endpoint := strings.Join(addrs, ",")
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err)
cleanup := func() {
provider.Close()
for _, c := range cleanups {
c()
}
}
return provider, addrs, cleanup
}
func TestGrpcConnectionProvider(t *testing.T) {
provider, addrs, cleanup := testProvider(t, 3)
defer cleanup()
t.Run("initial state", func(t *testing.T) {
assert.Equal(t, 3, len(provider.Hosts()))
assert.Equal(t, addrs[0], provider.CurrentHost())
assert.NotNil(t, provider.CurrentConn())
})
t.Run("SwitchHost", func(t *testing.T) {
require.NoError(t, provider.SwitchHost(1))
assert.Equal(t, addrs[1], provider.CurrentHost())
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
require.NoError(t, provider.SwitchHost(0))
assert.Equal(t, addrs[0], provider.CurrentHost())
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
})
t.Run("SwitchHost circular", func(t *testing.T) {
// Test round-robin style switching using SwitchHost with manual index
indices := []int{1, 2, 0, 1} // Simulate circular switching
for i, idx := range indices {
require.NoError(t, provider.SwitchHost(idx))
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
}
})
t.Run("Hosts returns copy", func(t *testing.T) {
hosts := provider.Hosts()
original := hosts[0]
hosts[0] = "modified"
assert.Equal(t, original, provider.Hosts()[0])
})
}
func TestGrpcConnectionProvider_Close(t *testing.T) {
provider, _, cleanup := testProvider(t, 1)
defer cleanup()
assert.NotNil(t, provider.CurrentConn())
provider.Close()
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
provider.Close() // Double close is safe
}

View File

@@ -0,0 +1,27 @@
package grpc
import "google.golang.org/grpc"
// MockGrpcProvider implements GrpcConnectionProvider for testing.
type MockGrpcProvider struct {
MockConn *grpc.ClientConn
MockHosts []string
CurrentIndex int
ConnCounter uint64
}
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
func (m *MockGrpcProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.CurrentIndex]
}
return ""
}
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
func (m *MockGrpcProvider) SwitchHost(idx int) error {
m.CurrentIndex = idx
m.ConnCounter++
return nil
}
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
func (m *MockGrpcProvider) Close() {}

34
api/rest/BUILD.bazel Normal file
View File

@@ -0,0 +1,34 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"mock_rest_provider.go",
"rest_connection_provider.go",
"rest_handler.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/apiutil:go_default_library",
"//api/client:go_default_library",
"//config/params:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["rest_connection_provider_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

9
api/rest/log.go Normal file
View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package rest
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/rest")

View File

@@ -0,0 +1,46 @@
package rest
import (
"bytes"
"context"
"net/http"
)
// MockRestProvider implements RestConnectionProvider for testing.
type MockRestProvider struct {
MockClient *http.Client
MockHandler Handler
MockHosts []string
HostIndex int
}
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
func (m *MockRestProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
}
return ""
}
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
// MockHandler implements Handler for testing.
type MockHandler struct {
MockHost string
}
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
return http.StatusOK, nil
}
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
return nil
}
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Host() string { return m.MockHost }

View File

@@ -0,0 +1,158 @@
package rest
import (
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/OffchainLabs/prysm/v7/api/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
type RestConnectionProvider interface {
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
HttpClient() *http.Client
// Handler returns the REST handler for making API requests.
Handler() Handler
// CurrentHost returns the current REST API endpoint URL.
CurrentHost() string
// Hosts returns all configured REST API endpoint URLs.
Hosts() []string
// SwitchHost switches to the endpoint at the given index.
SwitchHost(index int) error
}
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
type RestConnectionProviderOption func(*restConnectionProvider)
// WithHttpTimeout sets the HTTP client timeout.
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.timeout = timeout
}
}
// WithHttpHeaders sets custom HTTP headers to include in all requests.
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.headers = headers
}
}
// WithTracing enables OpenTelemetry tracing for HTTP requests.
func WithTracing() RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.enableTracing = true
}
}
type restConnectionProvider struct {
endpoints []string
httpClient *http.Client
restHandler *handler
currentIndex atomic.Uint64
timeout time.Duration
headers map[string][]string
enableTracing bool
}
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
endpoints := parseEndpoints(endpoint)
if len(endpoints) == 0 {
return nil, errors.New("no REST API endpoints provided")
}
p := &restConnectionProvider{
endpoints: endpoints,
}
for _, opt := range opts {
opt(p)
}
// Build the HTTP transport chain
var transport http.RoundTripper = http.DefaultTransport
// Add custom headers if configured
if len(p.headers) > 0 {
transport = client.NewCustomHeadersTransport(transport, p.headers)
}
// Add tracing if enabled
if p.enableTracing {
transport = otelhttp.NewTransport(transport)
}
p.httpClient = &http.Client{
Timeout: p.timeout,
Transport: transport,
}
// Create the REST handler with the HTTP client and initial host
p.restHandler = newHandler(*p.httpClient, endpoints[0])
log.WithFields(logrus.Fields{
"endpoints": endpoints,
"count": len(endpoints),
}).Info("Initialized REST connection provider")
return p, nil
}
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
func parseEndpoints(endpoint string) []string {
if endpoint == "" {
return nil
}
endpoints := make([]string, 0, 1)
for p := range strings.SplitSeq(endpoint, ",") {
if p = strings.TrimSpace(p); p != "" {
endpoints = append(endpoints, p)
}
}
return endpoints
}
func (p *restConnectionProvider) HttpClient() *http.Client {
return p.httpClient
}
func (p *restConnectionProvider) Handler() Handler {
return p.restHandler
}
func (p *restConnectionProvider) CurrentHost() string {
return p.endpoints[p.currentIndex.Load()]
}
func (p *restConnectionProvider) Hosts() []string {
// Return a copy to maintain immutability
hosts := make([]string, len(p.endpoints))
copy(hosts, p.endpoints)
return hosts
}
func (p *restConnectionProvider) SwitchHost(index int) error {
if index < 0 || index >= len(p.endpoints) {
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
}
oldIdx := p.currentIndex.Load()
p.currentIndex.Store(uint64(index))
// Update the rest handler's host
p.restHandler.SwitchHost(p.endpoints[index])
log.WithFields(logrus.Fields{
"previousHost": p.endpoints[oldIdx],
"newHost": p.endpoints[index],
}).Debug("Switched REST endpoint")
return nil
}

View File

@@ -0,0 +1,80 @@
package rest
import (
"reflect"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestParseEndpoints(t *testing.T) {
tests := []struct {
name string
input string
expected []string
}{
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
{"empty string", "", nil},
{"only commas", ",,,", []string{}},
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseEndpoints(tt.input)
if !reflect.DeepEqual(tt.expected, got) {
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
}
})
}
}
func TestNewRestConnectionProvider_Errors(t *testing.T) {
t.Run("no endpoints", func(t *testing.T) {
_, err := NewRestConnectionProvider("")
require.ErrorContains(t, "no REST API endpoints provided", err)
})
}
func TestRestConnectionProvider(t *testing.T) {
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
require.NoError(t, err)
t.Run("initial state", func(t *testing.T) {
assert.Equal(t, 3, len(provider.Hosts()))
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
assert.NotNil(t, provider.HttpClient())
})
t.Run("SwitchHost", func(t *testing.T) {
require.NoError(t, provider.SwitchHost(1))
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
require.NoError(t, provider.SwitchHost(0))
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
})
t.Run("Hosts returns copy", func(t *testing.T) {
hosts := provider.Hosts()
original := hosts[0]
hosts[0] = "modified"
assert.Equal(t, original, provider.Hosts()[0])
})
}
func TestRestConnectionProvider_WithOptions(t *testing.T) {
headers := map[string][]string{"Authorization": {"Bearer token"}}
provider, err := NewRestConnectionProvider(
"http://localhost:3500",
WithHttpHeaders(headers),
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
WithTracing(),
)
require.NoError(t, err)
assert.NotNil(t, provider.HttpClient())
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
}

View File

@@ -1,4 +1,4 @@
package beacon_api
package rest
import (
"bytes"
@@ -21,37 +21,46 @@ import (
type reqOption func(*http.Request)
type RestHandler interface {
// Handler defines the interface for making REST API requests.
type Handler interface {
Get(ctx context.Context, endpoint string, resp any) error
GetStatusCode(ctx context.Context, endpoint string) (int, error)
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
HttpClient() *http.Client
Host() string
SetHost(host string)
}
type BeaconApiRestHandler struct {
type handler struct {
client http.Client
host string
reqOverrides []reqOption
}
// NewBeaconApiRestHandler returns a RestHandler
func NewBeaconApiRestHandler(client http.Client, host string) RestHandler {
brh := &BeaconApiRestHandler{
// newHandler returns a *handler for internal use within the rest package.
func newHandler(client http.Client, host string) *handler {
rh := &handler{
client: client,
host: host,
}
brh.appendAcceptOverride()
return brh
rh.appendAcceptOverride()
return rh
}
// NewHandler returns a Handler
func NewHandler(client http.Client, host string) Handler {
rh := &handler{
client: client,
host: host,
}
rh.appendAcceptOverride()
return rh
}
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
func (c *BeaconApiRestHandler) appendAcceptOverride() {
func (c *handler) appendAcceptOverride() {
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
req.Header.Set("Accept", accept)
@@ -60,18 +69,18 @@ func (c *BeaconApiRestHandler) appendAcceptOverride() {
}
// HttpClient returns the underlying HTTP client of the handler
func (c *BeaconApiRestHandler) HttpClient() *http.Client {
func (c *handler) HttpClient() *http.Client {
return &c.client
}
// Host returns the underlying HTTP host
func (c *BeaconApiRestHandler) Host() string {
func (c *handler) Host() string {
return c.host
}
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
func (c *handler) Get(ctx context.Context, endpoint string, resp any) error {
url := c.host + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
@@ -94,7 +103,7 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp an
// GetStatusCode sends a GET request and returns only the HTTP status code.
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
func (c *handler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
url := c.host + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
@@ -113,7 +122,7 @@ func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint strin
return httpResp.StatusCode, nil
}
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
func (c *handler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
url := c.host + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
@@ -168,7 +177,7 @@ func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]b
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
func (c *BeaconApiRestHandler) Post(
func (c *handler) Post(
ctx context.Context,
apiEndpoint string,
headers map[string]string,
@@ -204,7 +213,7 @@ func (c *BeaconApiRestHandler) Post(
}
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
func (c *BeaconApiRestHandler) PostSSZ(
func (c *handler) PostSSZ(
ctx context.Context,
apiEndpoint string,
headers map[string]string,
@@ -305,6 +314,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
return nil
}
func (c *BeaconApiRestHandler) SetHost(host string) {
func (c *handler) SwitchHost(host string) {
c.host = host
}

View File

@@ -92,7 +92,11 @@ func AcceptEncodingHeaderHandler() Middleware {
return
}
gz := gzip.NewWriter(w)
gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed)
if err != nil {
next.ServeHTTP(w, r)
return
}
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
defer func() {
if !gzipRW.zip {

View File

@@ -9,6 +9,7 @@ go_library(
"conversions_blob.go",
"conversions_block.go",
"conversions_block_execution.go",
"conversions_gloas.go",
"conversions_lightclient.go",
"conversions_state.go",
"endpoints_beacon.go",
@@ -57,10 +58,12 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//consensus-types/blocks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],

View File

@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
// ----------------------------------------------------------------------------
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type SignedExecutionPayloadBid struct {
@@ -577,3 +577,17 @@ func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
func (s *SignedBeaconBlockGloas) SigString() string {
return s.Signature
}
type ExecutionPayloadEnvelope struct {
Payload *ExecutionPayloadDeneb `json:"payload"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
BuilderIndex string `json:"builder_index"`
BeaconBlockRoot string `json:"beacon_block_root"`
Slot string `json:"slot"`
StateRoot string `json:"state_root"`
}
type SignedExecutionPayloadEnvelope struct {
Message *ExecutionPayloadEnvelope `json:"message"`
Signature string `json:"signature"`
}

View File

@@ -2939,18 +2939,22 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
}
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
for i := range b.BlobKzgCommitments {
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitments: blobKzgCommitments,
}
}
@@ -3187,22 +3191,30 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayment")
}
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
if err != nil {
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
}
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
for i, commitment := range b.BlobKzgCommitments {
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
}
blobKzgCommitments[i] = kzg
}
return &eth.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitments: blobKzgCommitments,
}, nil
}
@@ -3263,3 +3275,26 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
BlobDataAvailable: d.BlobDataAvailable,
}, nil
}
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
if err != nil {
return nil, err
}
var requests *ExecutionRequests
if e.Message.ExecutionRequests != nil {
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
}
return &SignedExecutionPayloadEnvelope{
Message: &ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: requests,
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", e.Message.Slot),
StateRoot: hexutil.Encode(e.Message.StateRoot),
},
Signature: hexutil.Encode(e.Signature),
}, nil
}

View File

@@ -0,0 +1,89 @@
package structs
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func ROExecutionPayloadBidFromConsensus(b interfaces.ROExecutionPayloadBid) *ExecutionPayloadBid {
if b == nil {
return nil
}
pbh := b.ParentBlockHash()
pbr := b.ParentBlockRoot()
bh := b.BlockHash()
pr := b.PrevRandao()
fr := b.FeeRecipient()
commitments := b.BlobKzgCommitments()
blobKzgCommitments := make([]string, 0, len(commitments))
for _, commitment := range commitments {
blobKzgCommitments = append(blobKzgCommitments, hexutil.Encode(commitment))
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(pbh[:]),
ParentBlockRoot: hexutil.Encode(pbr[:]),
BlockHash: hexutil.Encode(bh[:]),
PrevRandao: hexutil.Encode(pr[:]),
FeeRecipient: hexutil.Encode(fr[:]),
GasLimit: fmt.Sprintf("%d", b.GasLimit()),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex()),
Slot: fmt.Sprintf("%d", b.Slot()),
Value: fmt.Sprintf("%d", b.Value()),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment()),
BlobKzgCommitments: blobKzgCommitments,
}
}
func BuildersFromConsensus(builders []*ethpb.Builder) []*Builder {
newBuilders := make([]*Builder, len(builders))
for i, b := range builders {
newBuilders[i] = BuilderFromConsensus(b)
}
return newBuilders
}
func BuilderFromConsensus(b *ethpb.Builder) *Builder {
return &Builder{
Pubkey: hexutil.Encode(b.Pubkey),
Version: hexutil.Encode(b.Version),
ExecutionAddress: hexutil.Encode(b.ExecutionAddress),
Balance: fmt.Sprintf("%d", b.Balance),
DepositEpoch: fmt.Sprintf("%d", b.DepositEpoch),
WithdrawableEpoch: fmt.Sprintf("%d", b.WithdrawableEpoch),
}
}
func BuilderPendingPaymentsFromConsensus(payments []*ethpb.BuilderPendingPayment) []*BuilderPendingPayment {
newPayments := make([]*BuilderPendingPayment, len(payments))
for i, p := range payments {
newPayments[i] = BuilderPendingPaymentFromConsensus(p)
}
return newPayments
}
func BuilderPendingPaymentFromConsensus(p *ethpb.BuilderPendingPayment) *BuilderPendingPayment {
return &BuilderPendingPayment{
Weight: fmt.Sprintf("%d", p.Weight),
Withdrawal: BuilderPendingWithdrawalFromConsensus(p.Withdrawal),
}
}
func BuilderPendingWithdrawalsFromConsensus(withdrawals []*ethpb.BuilderPendingWithdrawal) []*BuilderPendingWithdrawal {
newWithdrawals := make([]*BuilderPendingWithdrawal, len(withdrawals))
for i, w := range withdrawals {
newWithdrawals[i] = BuilderPendingWithdrawalFromConsensus(w)
}
return newWithdrawals
}
func BuilderPendingWithdrawalFromConsensus(w *ethpb.BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
return &BuilderPendingWithdrawal{
FeeRecipient: hexutil.Encode(w.FeeRecipient),
Amount: fmt.Sprintf("%d", w.Amount),
BuilderIndex: fmt.Sprintf("%d", w.BuilderIndex),
}
}

View File

@@ -972,3 +972,223 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
ProposerLookahead: lookahead,
}, nil
}
// ----------------------------------------------------------------------------
// Gloas
// ----------------------------------------------------------------------------
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr := st.HistoricalRoots()
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
srcLookahead, err := st.ProposerLookahead()
if err != nil {
return nil, err
}
lookahead := make([]string, len(srcLookahead))
for i, v := range srcLookahead {
lookahead[i] = fmt.Sprintf("%d", uint64(v))
}
// Gloas-specific fields
lepb, err := st.LatestExecutionPayloadBid()
if err != nil {
return nil, err
}
builders, err := st.Builders()
if err != nil {
return nil, err
}
nwbi, err := st.NextWithdrawalBuilderIndex()
if err != nil {
return nil, err
}
epa, err := st.ExecutionPayloadAvailabilityVector()
if err != nil {
return nil, err
}
bpp, err := st.BuilderPendingPayments()
if err != nil {
return nil, err
}
bpw, err := st.BuilderPendingWithdrawals()
if err != nil {
return nil, err
}
lbh, err := st.LatestBlockHash()
if err != nil {
return nil, err
}
pew, err := st.PayloadExpectedWithdrawals()
if err != nil {
return nil, err
}
return &BeaconStateGloas{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
ProposerLookahead: lookahead,
LatestExecutionPayloadBid: ROExecutionPayloadBidFromConsensus(lepb),
Builders: BuildersFromConsensus(builders),
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
ExecutionPayloadAvailability: hexutil.Encode(epa),
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
LatestBlockHash: hexutil.Encode(lbh[:]),
PayloadExpectedWithdrawals: WithdrawalsFromConsensus(pew),
}, nil
}

View File

@@ -1,11 +1,15 @@
package structs
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/ethereum/go-ethereum/common/hexutil"
)
@@ -355,3 +359,214 @@ func TestIndexedAttestation_ToConsensus(t *testing.T) {
_, err := a.ToConsensus()
require.ErrorContains(t, errNilValue.Error(), err)
}
func TestROExecutionPayloadBidFromConsensus(t *testing.T) {
t.Run("empty blobkzg commitments", func(t *testing.T) {
bid := &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
GasLimit: 100,
BuilderIndex: 7,
Slot: 9,
Value: 11,
ExecutionPayment: 22,
BlobKzgCommitments: [][]byte{},
}
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
require.NoError(t, err)
got := ROExecutionPayloadBidFromConsensus(roBid)
want := &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
BlockHash: hexutil.Encode(bid.BlockHash),
PrevRandao: hexutil.Encode(bid.PrevRandao),
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
GasLimit: "100",
BuilderIndex: "7",
Slot: "9",
Value: "11",
ExecutionPayment: "22",
BlobKzgCommitments: []string{},
}
assert.DeepEqual(t, want, got)
})
t.Run("default", func(t *testing.T) {
bid := &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
GasLimit: 100,
BuilderIndex: 7,
Slot: 9,
Value: 11,
ExecutionPayment: 22,
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x06}, 48)},
}
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
require.NoError(t, err)
var bkcs []string
for _, commitment := range roBid.BlobKzgCommitments() {
bkcs = append(bkcs, hexutil.Encode(commitment))
}
got := ROExecutionPayloadBidFromConsensus(roBid)
want := &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
BlockHash: hexutil.Encode(bid.BlockHash),
PrevRandao: hexutil.Encode(bid.PrevRandao),
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
GasLimit: "100",
BuilderIndex: "7",
Slot: "9",
Value: "11",
ExecutionPayment: "22",
BlobKzgCommitments: bkcs,
}
assert.DeepEqual(t, want, got)
})
}
func TestBuilderConversionsFromConsensus(t *testing.T) {
builder := &eth.Builder{
Pubkey: bytes.Repeat([]byte{0xAA}, 48),
Version: bytes.Repeat([]byte{0x01}, 4),
ExecutionAddress: bytes.Repeat([]byte{0xBB}, 20),
Balance: 42,
DepositEpoch: 3,
WithdrawableEpoch: 4,
}
wantBuilder := &Builder{
Pubkey: hexutil.Encode(builder.Pubkey),
Version: hexutil.Encode(builder.Version),
ExecutionAddress: hexutil.Encode(builder.ExecutionAddress),
Balance: "42",
DepositEpoch: "3",
WithdrawableEpoch: "4",
}
assert.DeepEqual(t, wantBuilder, BuilderFromConsensus(builder))
assert.DeepEqual(t, []*Builder{wantBuilder}, BuildersFromConsensus([]*eth.Builder{builder}))
}
func TestBuilderPendingPaymentConversionsFromConsensus(t *testing.T) {
withdrawal := &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x10}, 20),
Amount: 15,
BuilderIndex: 2,
}
payment := &eth.BuilderPendingPayment{
Weight: 5,
Withdrawal: withdrawal,
}
wantWithdrawal := &BuilderPendingWithdrawal{
FeeRecipient: hexutil.Encode(withdrawal.FeeRecipient),
Amount: "15",
BuilderIndex: "2",
}
wantPayment := &BuilderPendingPayment{
Weight: "5",
Withdrawal: wantWithdrawal,
}
assert.DeepEqual(t, wantPayment, BuilderPendingPaymentFromConsensus(payment))
assert.DeepEqual(t, []*BuilderPendingPayment{wantPayment}, BuilderPendingPaymentsFromConsensus([]*eth.BuilderPendingPayment{payment}))
assert.DeepEqual(t, wantWithdrawal, BuilderPendingWithdrawalFromConsensus(withdrawal))
assert.DeepEqual(t, []*BuilderPendingWithdrawal{wantWithdrawal}, BuilderPendingWithdrawalsFromConsensus([]*eth.BuilderPendingWithdrawal{withdrawal}))
}
func TestBeaconStateGloasFromConsensus(t *testing.T) {
st, err := util.NewBeaconStateGloas(func(state *eth.BeaconStateGloas) error {
state.GenesisTime = 123
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
state.Slot = 5
state.ProposerLookahead = []uint64{1, 2}
state.LatestExecutionPayloadBid = &eth.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),
BlockHash: bytes.Repeat([]byte{0x13}, 32),
PrevRandao: bytes.Repeat([]byte{0x14}, 32),
FeeRecipient: bytes.Repeat([]byte{0x15}, 20),
GasLimit: 64,
BuilderIndex: 3,
Slot: 5,
Value: 99,
ExecutionPayment: 7,
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x16}, 48)},
}
state.Builders = []*eth.Builder{
{
Pubkey: bytes.Repeat([]byte{0x20}, 48),
Version: bytes.Repeat([]byte{0x21}, 4),
ExecutionAddress: bytes.Repeat([]byte{0x22}, 20),
Balance: 88,
DepositEpoch: 1,
WithdrawableEpoch: 2,
},
}
state.NextWithdrawalBuilderIndex = 9
state.ExecutionPayloadAvailability = []byte{0x01, 0x02}
state.BuilderPendingPayments = []*eth.BuilderPendingPayment{
{
Weight: 3,
Withdrawal: &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x23}, 20),
Amount: 4,
BuilderIndex: 5,
},
},
}
state.BuilderPendingWithdrawals = []*eth.BuilderPendingWithdrawal{
{
FeeRecipient: bytes.Repeat([]byte{0x24}, 20),
Amount: 6,
BuilderIndex: 7,
},
}
state.LatestBlockHash = bytes.Repeat([]byte{0x25}, 32)
state.PayloadExpectedWithdrawals = []*enginev1.Withdrawal{
{Index: 1, ValidatorIndex: 2, Address: bytes.Repeat([]byte{0x26}, 20), Amount: 10},
}
return nil
})
require.NoError(t, err)
got, err := BeaconStateGloasFromConsensus(st)
require.NoError(t, err)
require.Equal(t, "123", got.GenesisTime)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x10}, 32)), got.GenesisValidatorsRoot)
require.Equal(t, "5", got.Slot)
require.DeepEqual(t, []string{"1", "2"}, got.ProposerLookahead)
require.Equal(t, "9", got.NextWithdrawalBuilderIndex)
require.Equal(t, hexutil.Encode([]byte{0x01, 0x02}), got.ExecutionPayloadAvailability)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x25}, 32)), got.LatestBlockHash)
require.NotNil(t, got.LatestExecutionPayloadBid)
require.Equal(t, "64", got.LatestExecutionPayloadBid.GasLimit)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x11}, 32)), got.LatestExecutionPayloadBid.ParentBlockHash)
require.NotNil(t, got.Builders)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x20}, 48)), got.Builders[0].Pubkey)
require.Equal(t, "88", got.Builders[0].Balance)
require.Equal(t, "3", got.BuilderPendingPayments[0].Weight)
require.Equal(t, "4", got.BuilderPendingPayments[0].Withdrawal.Amount)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x23}, 20)), got.BuilderPendingPayments[0].Withdrawal.FeeRecipient)
require.Equal(t, "6", got.BuilderPendingWithdrawals[0].Amount)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x24}, 20)), got.BuilderPendingWithdrawals[0].FeeRecipient)
require.Equal(t, "1", got.PayloadExpectedWithdrawals[0].WithdrawalIndex)
require.Equal(t, "2", got.PayloadExpectedWithdrawals[0].ValidatorIndex)
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x26}, 20)), got.PayloadExpectedWithdrawals[0].ExecutionAddress)
require.Equal(t, "10", got.PayloadExpectedWithdrawals[0].Amount)
}

View File

@@ -285,6 +285,13 @@ type GetBlobsResponse struct {
Data []string `json:"data"` //blobs
}
type GetExecutionPayloadEnvelopeResponse struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data *SignedExecutionPayloadEnvelope `json:"data"`
}
type SSZQueryRequest struct {
Query string `json:"query"`
IncludeProof bool `json:"include_proof,omitempty"`

View File

@@ -112,3 +112,8 @@ type LightClientOptimisticUpdateEvent struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type PayloadEvent struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root"`
}

View File

@@ -63,6 +63,19 @@ type PeerCount struct {
Connected string `json:"connected"`
Disconnecting string `json:"disconnecting"`
}
type GetVersionV2Response struct {
Data *VersionV2 `json:"data"`
}
type VersionV2 struct {
BeaconNode *ClientVersionV1 `json:"beacon_node"`
ExecutionClient *ClientVersionV1 `json:"execution_client,omitempty"`
}
type ClientVersionV1 struct {
Code string `json:"code"`
Name string `json:"name"`
Version string `json:"version"`
Commit string `json:"commit"`
}
type GetVersionResponse struct {
Data *Version `json:"data"`

View File

@@ -74,6 +74,18 @@ type SyncCommitteeDuty struct {
ValidatorSyncCommitteeIndices []string `json:"validator_sync_committee_indices"`
}
type GetPTCDutiesResponse struct {
DependentRoot string `json:"dependent_root"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Data []*PTCDuty `json:"data"`
}
type PTCDuty struct {
Pubkey string `json:"pubkey"`
ValidatorIndex string `json:"validator_index"`
Slot string `json:"slot"`
}
// ProduceBlockV3Response is a wrapper json object for the returned block from the ProduceBlockV3 endpoint
type ProduceBlockV3Response struct {
Version string `json:"version"`

View File

@@ -262,3 +262,23 @@ type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}
type Builder struct {
Pubkey string `json:"pubkey"`
Version string `json:"version"`
ExecutionAddress string `json:"execution_address"`
Balance string `json:"balance"`
DepositEpoch string `json:"deposit_epoch"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type BuilderPendingPayment struct {
Weight string `json:"weight"`
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
}
type BuilderPendingWithdrawal struct {
FeeRecipient string `json:"fee_recipient"`
Amount string `json:"amount"`
BuilderIndex string `json:"builder_index"`
}

View File

@@ -221,3 +221,51 @@ type BeaconStateFulu struct {
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
}
type BeaconStateGloas struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
ProposerLookahead []string `json:"proposer_lookahead"`
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
Builders []*Builder `json:"builders"`
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
LatestBlockHash string `json:"latest_block_hash"`
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
}

View File

@@ -10,6 +10,7 @@ go_library(
"error.go",
"execution_engine.go",
"forkchoice_update_execution.go",
"gloas.go",
"head.go",
"head_sync_committee_info.go",
"init_sync_process_block.go",
@@ -27,6 +28,8 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"receive_execution_payload_envelope.go",
"receive_payload_attestation_message.go",
"service.go",
"setup_forkchoice.go",
"tracked_proposer.go",
@@ -50,6 +53,7 @@ go_library(
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
@@ -85,6 +89,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
@@ -118,6 +123,7 @@ go_test(
"error_test.go",
"execution_engine_test.go",
"forkchoice_update_execution_test.go",
"gloas_test.go",
"head_sync_committee_info_test.go",
"head_test.go",
"init_sync_process_block_test.go",
@@ -130,6 +136,7 @@ go_test(
"process_block_test.go",
"receive_attestation_test.go",
"receive_block_test.go",
"receive_payload_attestation_message_test.go",
"service_norace_test.go",
"service_test.go",
"setup_forkchoice_test.go",
@@ -147,6 +154,7 @@ go_test(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
@@ -178,6 +186,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -18,6 +18,7 @@ import (
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
@@ -44,14 +45,18 @@ type ForkchoiceFetcher interface {
SetForkChoiceGenesisTime(time.Time)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HasFullNode([32]byte) bool
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
InsertPayload(interfaces.ROExecutionPayloadEnvelope) error
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
DependentRoot(primitives.Epoch) ([32]byte, error)
CanonicalNodeAtSlot(primitives.Slot) ([32]byte, bool)
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -113,6 +118,7 @@ type FinalizationFetcher interface {
FinalizedBlockHash() [32]byte
InForkchoice([32]byte) bool
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
ParentPayloadReady(interfaces.ReadOnlyBeaconBlock) bool
}
// OptimisticModeFetcher retrieves information about optimistic status of the node.
@@ -402,6 +408,32 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// ParentPayloadReady returns true if the block's parent payload is available
// in forkchoice. For pre-Gloas blocks or blocks building on empty, this always
// returns true. For blocks building on full, it checks that the full node
// exists.
func (s *Service) ParentPayloadReady(blk interfaces.ReadOnlyBeaconBlock) bool {
if blk.Version() < version.Gloas {
return true
}
parentRoot := blk.ParentRoot()
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
if err != nil {
return false
}
bid, err := blk.Body().SignedExecutionPayloadBid()
if err != nil || bid == nil || bid.Message == nil {
return false
}
parentBlockHash := [32]byte(bid.Message.ParentBlockHash)
if parentBlockHash != blockHash {
return true // builds on empty, no full node needed
}
return s.cfg.ForkChoiceStore.HasFullNode(parentRoot)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/version"
@@ -41,6 +42,20 @@ func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
}
// HighestReceivedBlockRoot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockRoot() [32]byte {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
}
// HasFullNode returns the corresponding value from forkchoice
func (s *Service) HasFullNode(root [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasFullNode(root)
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
s.cfg.ForkChoiceStore.RLock()
@@ -55,6 +70,13 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block co
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
}
// InsertPayload is a wrapper for payload insertion which is self locked
func (s *Service) InsertPayload(pe interfaces.ROExecutionPayloadEnvelope) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertPayload(pe)
}
// ForkChoiceDump returns the corresponding value from forkchoice
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
s.cfg.ForkChoiceStore.RLock()
@@ -128,6 +150,13 @@ func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byt
return bytesutil.SafeCopyBytes(header.BlockHash()), nil
}
// CanonicalNodeAtSlot wraps the corresponding method in forkchoice
func (s *Service) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.CanonicalNodeAtSlot(slot)
}
// DependentRoot wraps the corresponding method in forkchoice
func (s *Service) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()

View File

@@ -620,6 +620,121 @@ func TestService_IsFinalized(t *testing.T) {
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
}
func TestParentPayloadReady(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 0
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, tr := minimalTestService(t)
ctx := t.Context()
fcs := tr.fcs
parentRoot := [32]byte{1}
parentBlockHash := [32]byte{10}
zeroHash := params.BeaconConfig().ZeroHash
// Insert parent node into forkchoice.
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, zeroHash, parentBlockHash, zeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, st, parentROBlock))
t.Run("pre-Gloas always true", func(t *testing.T) {
blk := util.HydrateSignedBeaconBlockDeneb(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{ParentRoot: parentRoot[:]},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
})
t.Run("parent not in forkchoice", func(t *testing.T) {
unknownParent := [32]byte{99}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: unknownParent[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
})
t.Run("builds on empty", func(t *testing.T) {
differentHash := [32]byte{99}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: differentHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
})
t.Run("builds on full without payload", func(t *testing.T) {
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
})
t.Run("builds on full with payload", func(t *testing.T) {
pe, err := blocks.WrappedROExecutionPayloadEnvelope(&ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: parentRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{},
})
require.NoError(t, err)
require.NoError(t, fcs.InsertPayload(pe))
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
})
}
func Test_hashForGenesisRoot(t *testing.T) {
beaconDB := testDB.SetupDB(t)
ctx := t.Context()

View File

@@ -101,11 +101,16 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
// TODO: Gloas, we should not include the head root in this call
if len(invalidRoots) == 0 || invalidRoots[0] != headRoot {
invalidRoots = append([][32]byte{headRoot}, invalidRoots...)
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
@@ -228,6 +233,9 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
if stVersion < version.Bellatrix {
return true, nil
}
if blk.Version() >= version.Gloas {
return false, nil
}
body := blk.Block().Body()
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
if err != nil {
@@ -290,10 +298,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
if err != nil {
return err
}

View File

@@ -429,9 +429,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
// Insert Attestations to D, F and G so that they have higher weight than D
// Ensure G is head
fcs.ProcessAttestation(ctx, []uint64{0}, brd, 1)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, 1)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, 1)
fcs.ProcessAttestation(ctx, []uint64{0}, brd, params.BeaconConfig().SlotsPerEpoch, true)
fcs.ProcessAttestation(ctx, []uint64{1}, brf, params.BeaconConfig().SlotsPerEpoch, true)
fcs.ProcessAttestation(ctx, []uint64{2}, brg, params.BeaconConfig().SlotsPerEpoch, true)
fcs.SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
jc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: bra}
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, jc))
@@ -465,9 +465,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
// Ensure F and G's full nodes were removed but their empty (consensus) nodes remain, as does E
require.Equal(t, true, fcs.HasNode(brf))
require.Equal(t, true, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
@@ -703,14 +703,13 @@ func Test_reportInvalidBlock(t *testing.T) {
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'c'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, 2, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
@@ -785,7 +784,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
}
func Test_GetPayloadAttributeV3(t *testing.T) {
var testCases = []struct {
testCases := []struct {
name string
st bstate.BeaconState
}{

View File

@@ -56,7 +56,7 @@ type fcuConfig struct {
// sendFCU handles the logic to notify the engine of a forckhoice update
// when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions .
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
if cfg.postState.Version() < version.Fulu {
// update the caches to compute the right proposer index
// this function is called under a forkchoice lock which we need to release.
@@ -64,7 +64,8 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
s.updateCachesPostBlockProcessing(cfg)
s.ForkChoicer().Lock()
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
fcuArgs, err := s.getFCUArgs(cfg)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return
}

View File

@@ -0,0 +1,45 @@
package blockchain
import (
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// getLookupParentRoot returns the root that serves as key to generate the parent state for the passed beacon block.
// if it is based on empty or it is pre-Gloas, it is the parent root of the block, otherwise if it is based on full it is
// the parent hash.
// The caller of this function should not hold a lock on forkchoice.
func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, error) {
bl := b.Block()
parentRoot := bl.ParentRoot()
if b.Version() < version.Gloas {
return parentRoot, nil
}
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
if err != nil {
return [32]byte{}, errors.Wrap(err, "failed to get slot for parent root")
}
if slots.ToEpoch(parentSlot) < params.BeaconConfig().GloasForkEpoch {
return parentRoot, nil
}
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
if err != nil {
return [32]byte{}, errors.Wrap(err, "failed to get block hash for parent root")
}
bid, err := bl.Body().SignedExecutionPayloadBid()
if err != nil {
return [32]byte{}, errors.Wrap(err, "failed to get signed execution payload bid from block body")
}
if bid == nil || bid.Message == nil || len(bid.Message.ParentBlockHash) != 32 {
return [32]byte{}, errors.New("invalid signed execution payload bid message")
}
parentHash := [32]byte(bid.Message.ParentBlockHash)
if blockHash == parentHash {
return parentHash, nil
}
return parentRoot, nil
}

View File

@@ -0,0 +1,648 @@
package blockchain
import (
"context"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func prepareGloasForkchoiceState(
_ context.Context,
slot primitives.Slot,
blockRoot [32]byte,
parentRoot [32]byte,
blockHash [32]byte,
parentBlockHash [32]byte,
justifiedEpoch primitives.Epoch,
finalizedEpoch primitives.Epoch,
) (state.BeaconState, blocks.ROBlock, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: justifiedEpoch,
}
finalizedCheckpoint := &ethpb.Checkpoint{
Epoch: finalizedEpoch,
}
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
for i := range builderPendingPayments {
builderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
base := &ethpb.BeaconStateGloas{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentJustifiedCheckpoint: justifiedCheckpoint,
FinalizedCheckpoint: finalizedCheckpoint,
LatestBlockHeader: blockHeader,
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
},
Builders: make([]*ethpb.Builder, 0),
BuilderPendingPayments: builderPendingPayments,
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]uint64, 64),
}
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
if err != nil {
return nil, blocks.ROBlock{}, err
}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{
SignedExecutionPayloadBid: bid,
},
},
})
signed, err := blocks.NewSignedBeaconBlock(blk)
if err != nil {
return nil, blocks.ROBlock{}, err
}
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
return st, roblock, err
}
func testGloasState(t *testing.T, slot primitives.Slot, parentRoot [32]byte, blockHash [32]byte) (*ethpb.BeaconStateGloas, *ethpb.SignedBeaconBlockGloas) {
t.Helper()
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
for i := range builderPendingPayments {
builderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
}
}
base := &ethpb.BeaconStateGloas{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
StateRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
LatestBlockHeader: &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
},
Builders: make([]*ethpb.Builder, 0),
BuilderPendingPayments: builderPendingPayments,
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]uint64, 64),
}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: make([]byte, 32),
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
},
})
return base, blk
}
func testSignedEnvelope(t *testing.T, blockRoot [32]byte, slot primitives.Slot, blockHash []byte) *ethpb.SignedExecutionPayloadEnvelope {
t.Helper()
return &ethpb.SignedExecutionPayloadEnvelope{
Message: &ethpb.ExecutionPayloadEnvelope{
Payload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: blockHash,
Transactions: [][]byte{},
Withdrawals: []*enginev1.Withdrawal{},
},
ExecutionRequests: &enginev1.ExecutionRequests{},
BuilderIndex: 0,
BeaconBlockRoot: blockRoot[:],
Slot: slot,
StateRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
}
}
func setupGloasService(t *testing.T, engineClient *mockExecution.EngineClient) (*Service, *testServiceRequirements) {
t.Helper()
return minimalTestService(t,
WithPayloadIDCache(cache.NewPayloadIDCache()),
WithExecutionEngineCaller(engineClient),
)
}
func insertGloasBlock(t *testing.T, s *Service, base *ethpb.BeaconStateGloas, blk *ethpb.SignedBeaconBlockGloas, blockRoot [32]byte) {
t.Helper()
ctx := t.Context()
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
signed, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, signed))
require.NoError(t, s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: blockRoot[:], Slot: blk.Block.Slot}))
require.NoError(t, s.cfg.StateGen.SaveState(ctx, blockRoot, st))
require.NoError(t, s.InsertNode(ctx, st, roblock))
}
func TestGetPayloadEnvelopePrestate_UnknownRoot(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
unknownRoot := bytesutil.ToBytes32([]byte("unknown"))
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: unknownRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{},
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
_, err = s.getPayloadEnvelopePrestate(ctx, envelope)
require.ErrorContains(t, "not found in forkchoice", err)
}
func TestGetPayloadEnvelopePrestate_OK(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, blk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, blk, blockRoot)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: blockRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{},
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
st, err := s.getPayloadEnvelopePrestate(ctx, envelope)
require.NoError(t, err)
require.Equal(t, primitives.Slot(1), st.Slot())
}
func TestNotifyNewEnvelope_Valid(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, parentRoot, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: blockRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:]},
ExecutionRequests: &enginev1.ExecutionRequests{},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
isValid, err := s.notifyNewEnvelope(ctx, st, envelope)
require.NoError(t, err)
require.Equal(t, true, isValid)
}
func TestNotifyNewEnvelope_Syncing(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{
ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus,
})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, parentRoot, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: blockRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:]},
ExecutionRequests: &enginev1.ExecutionRequests{},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
isValid, err := s.notifyNewEnvelope(ctx, st, envelope)
require.NoError(t, err)
require.Equal(t, false, isValid)
}
func TestNotifyNewEnvelope_Invalid(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{
ErrNewPayload: execution.ErrInvalidPayloadStatus,
})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, parentRoot, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: blockRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:]},
ExecutionRequests: &enginev1.ExecutionRequests{},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
_, err = s.notifyNewEnvelope(ctx, st, envelope)
require.Equal(t, true, IsInvalidBlock(err))
}
func TestNotifyForkchoiceUpdateGloas_Valid(t *testing.T) {
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
s, _ := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
ctx := t.Context()
blockHash := bytesutil.ToBytes32([]byte("hash1"))
attr := payloadattribute.EmptyWithVersion(version.Gloas)
retPid, err := s.notifyForkchoiceUpdateGloas(ctx, blockHash, attr)
require.NoError(t, err)
require.DeepEqual(t, pid, retPid)
}
func TestNotifyForkchoiceUpdateGloas_Syncing(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{
ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus,
})
ctx := t.Context()
blockHash := bytesutil.ToBytes32([]byte("hash1"))
_, err := s.notifyForkchoiceUpdateGloas(ctx, blockHash, nil)
require.NoError(t, err)
}
func TestNotifyForkchoiceUpdateGloas_Invalid(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{
ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus,
})
ctx := t.Context()
blockHash := bytesutil.ToBytes32([]byte("hash1"))
_, err := s.notifyForkchoiceUpdateGloas(ctx, blockHash, nil)
require.Equal(t, true, IsInvalidBlock(err))
}
func TestNotifyForkchoiceUpdateGloas_NilAttributes(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockHash := bytesutil.ToBytes32([]byte("hash1"))
_, err := s.notifyForkchoiceUpdateGloas(ctx, blockHash, nil)
require.NoError(t, err)
}
func TestSavePostPayload(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
protoEnv := testSignedEnvelope(t, blockRoot, 1, blockHash[:])
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(protoEnv)
require.NoError(t, err)
require.NoError(t, s.savePostPayload(ctx, signed, st))
// Verify the envelope was saved in the DB.
require.Equal(t, true, s.cfg.BeaconDB.HasExecutionPayloadEnvelope(ctx, blockRoot))
}
func TestValidateExecutionOnEnvelope_Valid(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, parentRoot, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: blockRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:], ParentHash: make([]byte, 32)},
ExecutionRequests: &enginev1.ExecutionRequests{},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
isValid, err := s.validateExecutionOnEnvelope(ctx, st, envelope)
require.NoError(t, err)
require.Equal(t, true, isValid)
}
func TestPostPayloadHeadUpdate_NotHead(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
root := bytesutil.ToBytes32([]byte("root1"))
headRoot := bytesutil.ToBytes32([]byte("different"))
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: root[:],
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:]},
Slot: 1,
}
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, headRoot[:]))
}
func TestGetLookupParentRoot_PreGloas(t *testing.T) {
service, _ := minimalTestService(t)
parentRoot := [32]byte{1}
blk := util.HydrateSignedBeaconBlockDeneb(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
ParentRoot: parentRoot[:],
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
got, err := service.getLookupParentRoot(roblock)
require.NoError(t, err)
require.Equal(t, parentRoot, got)
}
func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 0
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, req := minimalTestService(t)
ctx := t.Context()
parentRoot := [32]byte{1}
parentBlockHash := [32]byte{20}
parentNodeBlockHash := [32]byte{99} // Different from parentBlockHash => builds on empty
// Insert a Gloas node for the parent so BlockHash works.
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, params.BeaconConfig().ZeroHash, parentNodeBlockHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, req.fcs.InsertNode(ctx, st, parentROBlock))
blockHash := [32]byte{10}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{
SignedExecutionPayloadBid: bid,
},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
got, err := service.getLookupParentRoot(roblock)
require.NoError(t, err)
// parentBlockHash != parentNodeBlockHash, so it builds on empty => returns parentRoot
require.Equal(t, parentRoot, got)
}
func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 0
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, req := minimalTestService(t)
ctx := t.Context()
parentRoot := [32]byte{1}
parentNodeBlockHash := [32]byte{10}
// Insert a Gloas node for the parent so BlockHash works.
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, params.BeaconConfig().ZeroHash, parentNodeBlockHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, req.fcs.InsertNode(ctx, st, parentROBlock))
// Set parentBlockHash in the bid to match the parent's blockHash.
blockHash := [32]byte{20}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentNodeBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{
SignedExecutionPayloadBid: bid,
},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
got, err := service.getLookupParentRoot(roblock)
require.NoError(t, err)
// parentBlockHash == parentNodeBlockHash, so it builds on full => returns parentBlockHash
require.Equal(t, parentNodeBlockHash, got)
}
func TestGetLookupParentRoot_GloasParentPreForkEpoch(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.GloasForkEpoch = 2
cfg.InitializeForkSchedule()
params.OverrideBeaconConfig(cfg)
service, req := minimalTestService(t)
ctx := t.Context()
parentRoot := [32]byte{1}
parentNodeBlockHash := [32]byte{10}
parentSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
require.NoError(t, err)
parentSlot = parentSlot - 1
st, parentROBlock, err := prepareGloasForkchoiceState(
ctx,
parentSlot,
parentRoot,
params.BeaconConfig().ZeroHash,
parentNodeBlockHash,
params.BeaconConfig().ZeroHash,
0,
0,
)
require.NoError(t, err)
require.NoError(t, req.fcs.InsertNode(ctx, st, parentROBlock))
blockHash := [32]byte{20}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentNodeBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: parentSlot + 1,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{
SignedExecutionPayloadBid: bid,
},
},
})
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
got, err := service.getLookupParentRoot(roblock)
require.NoError(t, err)
// Parent slot is pre-fork, so always return parentRoot.
require.Equal(t, parentRoot, got)
}
func TestLateBlockTasks_GloasFCU(t *testing.T) {
logHook := logTest.NewGlobal()
resetCfg := features.InitWithReset(&features.Flags{
PrepareAllPayloads: true,
})
defer resetCfg()
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
blockHash := bytesutil.ToBytes32([]byte("hash1"))
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
base.LatestBlockHash = blockHash[:]
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
require.NoError(t, err)
headRoot := bytesutil.ToBytes32([]byte("headroot"))
service.head = &head{
root: headRoot,
state: st,
slot: 1,
}
// Set genesis time so CurrentSlot > HeadSlot, triggering late block logic.
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
service.SetForkChoiceGenesisTime(service.genesisTime)
service.lateBlockTasks(tr.ctx)
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
}

View File

@@ -170,7 +170,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// Forward an event capturing a new chain head over a common event feed
// done in a goroutine to avoid blocking the critical runtime main routine.
go func() {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot[:], newHeadRoot[:]); err != nil {
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newStateRoot[:], newHeadRoot[:]); err != nil {
log.WithError(err).Error("Could not notify event feed of new chain head")
}
}()
@@ -322,7 +322,6 @@ func (s *Service) hasHeadState() bool {
func (s *Service) notifyNewHeadEvent(
ctx context.Context,
newHeadSlot primitives.Slot,
newHeadState state.BeaconState,
newHeadStateRoot,
newHeadRoot []byte,
) error {

View File

@@ -152,7 +152,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
func Test_notifyNewHeadEvent(t *testing.T) {
t.Run("genesis_state_root", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
srv := testServiceWithDB(t)
srv.SetGenesisTime(time.Now())
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
@@ -165,7 +164,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
st, blk, err = prepareForkchoiceState(t.Context(), 1, newHeadRoot, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, newHeadStateRoot[:], newHeadRoot[:]))
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -202,7 +201,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
st, blk, err = prepareForkchoiceState(t.Context(), 0, newHeadRoot, [32]byte{}, [32]byte{}, &ethpb.Checkpoint{}, &ethpb.Checkpoint{})
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, newHeadStateRoot[:], newHeadRoot[:])
require.NoError(t, err)
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))
@@ -220,7 +219,6 @@ func Test_notifyNewHeadEvent(t *testing.T) {
require.DeepSSZEqual(t, wanted, eventHead)
})
t.Run("epoch transition", func(t *testing.T) {
bState, _ := util.DeterministicGenesisState(t, 10)
srv := testServiceWithDB(t)
srv.SetGenesisTime(time.Now())
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
@@ -234,7 +232,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
require.NoError(t, err)
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
newHeadSlot := params.BeaconConfig().SlotsPerEpoch
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, bState, newHeadStateRoot[:], newHeadRoot[:]))
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, newHeadStateRoot[:], newHeadRoot[:]))
events := notifier.ReceivedEvents()
require.Equal(t, 1, len(events))

View File

@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
copy(ckzgCells[i][:], cells[i][:])
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -5,11 +5,11 @@ import (
"fmt"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -40,7 +40,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() >= version.Bellatrix {
if b.Version() >= version.Bellatrix && b.Version() < version.Gloas {
p, err := b.Body().Execution()
if err != nil {
return err
@@ -56,7 +56,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
txsPerSlotCount.Set(float64(len(txs)))
}
}
if b.Version() >= version.Deneb {
if b.Version() >= version.Deneb && b.Version() < version.Gloas {
kzgs, err := b.Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Failed to get blob KZG commitments")
@@ -64,7 +64,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("kzgCommitmentCount", len(kzgs))
}
}
if b.Version() >= version.Electra {
if b.Version() >= version.Electra && b.Version() < version.Gloas {
eReqs, err := b.Body().ExecutionRequests()
if err != nil {
log.WithError(err).Error("Failed to get execution requests")
@@ -80,6 +80,20 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
}
}
}
if b.Version() >= version.Gloas {
signedBid, err := b.Body().SignedExecutionPayloadBid()
if err != nil {
log.WithError(err).Error("Failed to get signed execution payload bid")
} else {
bid := signedBid.Message
log = log.WithFields(logrus.Fields{
"blobKzgCommitmentCount": len(bid.BlobKzgCommitments),
"payloadHash": fmt.Sprintf("%#x", bytesutil.Trunc(bid.BlockHash)),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(bid.ParentBlockHash)),
"builderIndex": bid.BuilderIndex,
})
}
}
log.Info("Finished applying state transition")
return nil
}
@@ -87,46 +101,62 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return err
return errors.Wrap(err, "failed to get slot start time")
}
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
parentRoot := block.ParentRoot()
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": blkRoot,
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"epoch": slots.ToEpoch(block.Slot()),
"sinceSlotStartTime": sinceSlotStartTime,
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": blkRoot,
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": sinceSlotStartTime,
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
}
if block.Version() < version.Gloas {
moreFields["dataAvailabilityWaitedTime"] = daWaitedTime
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
log.WithError(err).Error("Failed to get signed execution payload bid for logging")
} else {
moreFields["blockHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.BlockHash))
moreFields["parentHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.ParentBlockHash))
moreFields["builderIndex"] = signedBid.Message.BuilderIndex
}
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
return nil
}
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
return nil
}
// logs payload related data every slot.
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
if block.Version() < version.Bellatrix || block.Version() >= version.Gloas {
return nil
}
payload, err := block.Body().Execution()

View File

@@ -96,7 +96,11 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
// We assume trusted attestation in this function has verified signature.
// Update forkchoice store with the new attestation for updating weight.
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
attData := a.GetData()
payloadStatus := true
if attData.Target.Epoch >= params.BeaconConfig().GloasForkEpoch {
payloadStatus = attData.CommitteeIndex == 1
}
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(attData.BeaconBlockRoot), attData.Slot, payloadStatus)
return nil
}

View File

@@ -1,12 +1,14 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -64,7 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
return invalidBlock{error: err}
}
startTime := time.Now()
fcuArgs := &fcuConfig{}
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
@@ -83,6 +84,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
if err := s.handleBlockPayloadAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
return errors.Wrap(err, "could not handle block's payload attestations")
}
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
if cfg.isValidPayload {
@@ -102,7 +106,14 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
return nil
}
s.sendFCU(cfg, fcuArgs)
if cfg.roblock.Version() < version.Gloas {
s.sendFCU(cfg)
} else if s.isNewHead(cfg.headRoot) {
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(ctx, cfg.postState, cfg.roblock)
}
// Pre-Fulu the caches are updated when computing the payload attributes
if cfg.postState.Version() >= version.Fulu {
@@ -232,7 +243,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
// this call does not have the root in forkchoice yet.
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -304,7 +316,18 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
slot := block.Slot()
if blockVersion >= version.Fulu {
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
if len(kzgCommitments) == 0 {
return nil
}
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), slot); err != nil {
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
}
@@ -366,6 +389,46 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
return nil
}
// refreshCaches updates the next slot state cache and epoch boundary caches.
// Before Fulu this is done synchronously, after Fulu it is deferred to a goroutine.
func (s *Service) refreshCaches(ctx context.Context, currentSlot primitives.Slot, headRoot [32]byte, headState state.BeaconState, accessRoot [32]byte) {
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if lastState.Version() < version.Fulu {
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
} else {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
}()
}
}
// updateCachesAndEpochBoundary updates the next slot state cache and handles
// epoch boundary processing. If the lastRoot matches accessRoot, the cached
// last state is reused; otherwise, the head state is advanced instead.
func (s *Service) updateCachesAndEpochBoundary(ctx context.Context, currentSlot primitives.Slot, headState state.BeaconState, accessRoot [32]byte, lastRoot []byte, lastState state.BeaconState) {
if bytes.Equal(lastRoot, accessRoot[:]) {
// Happy case, the last advanced state is head, we thus keep it
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
} else {
// Last advanced state was not head, we do not advance this but rather use headstate
headState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, accessRoot[:], headState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, accessRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
@@ -401,7 +464,11 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
}
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
if s.cfg.ForkChoiceStore.HasNode(r) {
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
payloadStatus := true
if a.GetData().Target.Epoch >= params.BeaconConfig().GloasForkEpoch {
payloadStatus = a.GetData().CommitteeIndex == 1
}
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Slot, payloadStatus)
} else if features.Get().EnableExperimentalAttestationPool {
if err = s.cfg.AttestationCache.Add(a); err != nil {
return err
@@ -413,6 +480,36 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
return nil
}
// handleBlockPayloadAttestations feeds payload attestations included in a Gloas block into forkchoice.
func (s *Service) handleBlockPayloadAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
if blk.Version() < version.Gloas {
return nil
}
atts, err := blk.Body().PayloadAttestations()
if err != nil {
return err
}
if len(atts) == 0 {
return nil
}
committee, err := gloas.PayloadCommittee(ctx, st, blk.Slot()-1)
if err != nil {
return err
}
for _, att := range atts {
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
if !s.cfg.ForkChoiceStore.HasNode(root) {
continue
}
for i := range committee {
if att.AggregationBits.BitAt(uint64(i)) {
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(i), att.Data.PayloadPresent, att.Data.BlobDataAvailable)
}
}
}
return nil
}
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
// This function requires a write lock on forkchoice.
@@ -543,6 +640,9 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return nil
}
if blk.Block().Version() >= version.Gloas {
return nil
}
// Skip validation if block has an empty payload.
payload, err := blk.Block().Body().Execution()
@@ -583,11 +683,22 @@ func (s *Service) runLateBlockTasks() {
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
cfg := params.BeaconConfig()
attDueBPS := cfg.AttestationDueBPS
if slots.ToEpoch(s.CurrentSlot()) >= cfg.GloasForkEpoch {
attDueBPS = cfg.AttestationDueBPSGloas
}
attThreshold := cfg.SlotComponentDuration(attDueBPS)
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, attThreshold, cfg.SecondsPerSlot)
for {
select {
case <-ticker.C():
case slot := <-ticker.C():
if attDueBPS != cfg.AttestationDueBPSGloas && slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
ticker.Done()
attDueBPS = cfg.AttestationDueBPSGloas
attThreshold = cfg.SlotComponentDuration(attDueBPS)
ticker = slots.NewSlotTickerWithOffset(s.genesisTime, attThreshold, cfg.SecondsPerSlot)
}
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
@@ -664,7 +775,18 @@ func (s *Service) isDataAvailable(
root := roBlock.Root()
blockVersion := block.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
if len(kzgCommitments) == 0 {
return nil
}
return s.areDataColumnsAvailable(ctx, root, block.Slot())
}
if blockVersion >= version.Deneb {
@@ -679,30 +801,15 @@ func (s *Service) isDataAvailable(
func (s *Service) areDataColumnsAvailable(
ctx context.Context,
root [fieldparams.RootLength]byte,
block interfaces.ReadOnlyBeaconBlock,
slot primitives.Slot,
) error {
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
currentSlot := s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(slot), slots.ToEpoch(currentSlot)
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
// If block has not commitments there is nothing to wait for.
if len(kzgCommitments) == 0 {
return nil
}
// All columns to sample need to be available for the block to be considered available.
nodeID := s.cfg.P2P.NodeID()
@@ -756,7 +863,7 @@ func (s *Service) areDataColumnsAvailable(
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
nextSlot, err := slots.StartTime(s.genesisTime, slot+1)
if err != nil {
return fmt.Errorf("unable to determine slot start time: %w", err)
}
@@ -771,7 +878,7 @@ func (s *Service) areDataColumnsAvailable(
}
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
@@ -817,7 +924,7 @@ func (s *Service) areDataColumnsAvailable(
missingIndices = helpers.SortedPrettySliceFromMap(missing)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", slot, root, missingIndices)
}
}
}
@@ -915,37 +1022,18 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Before Fulu we need to process the next slot to find out if we are proposing.
if lastState.Version() < version.Fulu {
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
var accessRoot [32]byte
isFull, err := headState.IsParentBlockFull()
if err != nil || !isFull {
accessRoot = headRoot
} else {
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
defer func() {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}()
}()
accessRoot, err = headState.LatestBlockHash()
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash, using head root as access root")
accessRoot = headRoot
}
}
s.refreshCaches(ctx, currentSlot, headRoot, headState, accessRoot)
// return early if we already started building a block for the current
// head root
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
@@ -959,26 +1047,38 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
if headState.Version() >= version.Gloas {
bh, err := headState.LatestBlockHash()
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
return
}
_, err = s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
} else {
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
s.headLock.RUnlock()
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
s.headLock.RUnlock()
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
}
}
@@ -992,9 +1092,10 @@ func (s *Service) waitForSync() error {
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
// the caller of this function must hold a write lock in forkchoice store.
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
}
return err
}

View File

@@ -38,57 +38,67 @@ func (s *Service) CurrentSlot() primitives.Slot {
}
// getFCUArgs returns the arguments to call forkchoice update
func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
return err
func (s *Service) getFCUArgs(cfg *postBlockProcessConfig) (*fcuConfig, error) {
fcuArgs, err := s.getFCUArgsEarlyBlock(cfg)
if err != nil {
return nil, err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
return fcuArgs, nil
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig) (*fcuConfig, error) {
if cfg.roblock.Root() == cfg.headRoot {
fcuArgs.headState = cfg.postState
fcuArgs.headBlock = cfg.roblock
fcuArgs.headRoot = cfg.headRoot
fcuArgs.proposingSlot = s.CurrentSlot() + 1
return nil
return &fcuConfig{
headState: cfg.postState,
headBlock: cfg.roblock,
headRoot: cfg.headRoot,
proposingSlot: s.CurrentSlot() + 1,
}, nil
}
return s.fcuArgsNonCanonicalBlock(cfg, fcuArgs)
return s.fcuArgsNonCanonicalBlock(cfg)
}
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Forkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
receivedWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(blockRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
}
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
headWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(headRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
}
log.WithFields(logrus.Fields{
fields := logrus.Fields{
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
"receivedWeight": receivedWeight,
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
}
headEmpty, headFull, err := s.cfg.ForkChoiceStore.PayloadWeights(headRoot)
if err == nil {
fields["headEmptyWeight"] = headEmpty
fields["headFullWeight"] = headFull
}
log.WithFields(fields).Debug("Head block is not the received block")
}
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
// incoming block is non-canonical, that is, based on the head root.
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig) (*fcuConfig, error) {
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot)
if err != nil {
return err
return nil, err
}
fcuArgs.headState = headState
fcuArgs.headBlock = headBlock
fcuArgs.headRoot = cfg.headRoot
fcuArgs.proposingSlot = s.CurrentSlot() + 1
return nil
return &fcuConfig{
headState: headState,
headBlock: headBlock,
headRoot: cfg.headRoot,
proposingSlot: s.CurrentSlot() + 1,
}, nil
}
// sendStateFeedOnBlock sends an event that a new block has been synced
@@ -189,36 +199,62 @@ func reportProcessingTime(startTime time.Time) {
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// GetPrestateToPropose returns the pre-state for a proposer to base its block on.
// It is similar to GetBlockPreState but it lacks unnecessary verifications.
func (s *Service) GetPrestateToPropose(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.GetPreStateToPropose")
defer span.End()
accessRoot, err := s.getLookupParentRoot(b)
if err != nil {
return nil, errors.Wrap(err, "could not get lookup parent root")
}
bl := b.Block()
preState, err := s.cfg.StateGen.StateByRoot(ctx, accessRoot)
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", bl.Slot())
}
if preState == nil || preState.IsNil() {
return nil, errors.Wrapf(err, "nil pre state for slot %d", bl.Slot())
}
return preState, nil
}
// GetBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.
func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
func (s *Service) GetBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
defer span.End()
accessRoot, err := s.getLookupParentRoot(b)
if err != nil {
return nil, errors.Wrap(err, "could not get lookup parent root")
}
// Verify incoming block has a valid pre state.
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
if err := s.verifyBlkPreState(ctx, accessRoot); err != nil {
return nil, err
}
preState, err := s.cfg.StateGen.StateByRoot(ctx, b.ParentRoot())
bl := b.Block()
preState, err := s.cfg.StateGen.StateByRoot(ctx, accessRoot)
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot())
return nil, errors.Wrapf(err, "could not get pre state for slot %d", bl.Slot())
}
if preState == nil || preState.IsNil() {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot())
return nil, errors.Wrapf(err, "nil pre state for slot %d", bl.Slot())
}
// Verify block slot time is not from the future.
if err := slots.VerifyTime(s.genesisTime, b.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
if err := slots.VerifyTime(s.genesisTime, bl.Slot(), params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
return nil, err
}
// Verify block is later than the finalized epoch slot.
if err := s.verifyBlkFinalizedSlot(b); err != nil {
if err := s.verifyBlkFinalizedSlot(bl); err != nil {
return nil, err
}
return preState, nil
}

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
@@ -731,13 +732,13 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -783,13 +784,13 @@ func TestOnBlock_CanFinalize(t *testing.T) {
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -847,13 +848,13 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1322,13 +1323,13 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
wg.Add(4)
var lock sync.Mutex
go func() {
preState, err := service.getBlockPreState(ctx, wsb1.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb1)
require.NoError(t, err)
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1336,13 +1337,13 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb2.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb2)
require.NoError(t, err)
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1350,13 +1351,13 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb3.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb3)
require.NoError(t, err)
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1364,13 +1365,13 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
wg.Done()
}()
go func() {
preState, err := service.getBlockPreState(ctx, wsb4.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb4)
require.NoError(t, err)
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1442,13 +1443,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1464,13 +1465,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1487,13 +1488,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1512,13 +1513,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
@@ -1549,12 +1550,12 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, rowsb)
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
@@ -1578,13 +1579,13 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
@@ -1647,13 +1648,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1670,13 +1671,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1692,13 +1693,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
@@ -1723,13 +1724,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
invalidRoots[i-13], err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1753,12 +1754,12 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, rowsb)
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
@@ -1793,13 +1794,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1820,13 +1821,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
@@ -1850,13 +1851,13 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
@@ -1912,13 +1913,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1934,13 +1935,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -1956,13 +1957,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
lastValidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
@@ -1989,13 +1990,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := service.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -2006,6 +2007,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import block 19 to find out that the whole chain 13--18 was in fact
// invalid
@@ -2020,12 +2022,12 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, rowsb)
require.NoError(t, err)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
require.NoError(t, err)
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
@@ -2112,13 +2114,13 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -2180,13 +2182,13 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -2416,14 +2418,12 @@ func Test_getFCUArgs(t *testing.T) {
isValidPayload: true,
}
// error branch
fcuArgs := &fcuConfig{}
err = s.getFCUArgs(cfg, fcuArgs)
_, err = s.getFCUArgs(cfg)
require.ErrorContains(t, "block does not exist", err)
// canonical branch
cfg.headRoot = cfg.roblock.Root()
fcuArgs = &fcuConfig{}
err = s.getFCUArgs(cfg, fcuArgs)
fcuArgs, err := s.getFCUArgs(cfg)
require.NoError(t, err)
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
}
@@ -2455,7 +2455,9 @@ func TestRollbackBlock(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
@@ -2468,7 +2470,7 @@ func TestRollbackBlock(t *testing.T) {
// Set invalid parent root to trigger forkchoice error.
wsb.SetParentRoot([]byte("bad"))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
// Rollback block insertion into db and caches.
@@ -2513,7 +2515,9 @@ func TestRollbackBlock_SavePostStateInfo_ContextDeadline(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
@@ -2569,13 +2573,13 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -2586,7 +2590,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err = service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
@@ -2600,8 +2606,6 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
// Set deadlined context when processing the block
cancCtx, canc := context.WithCancel(t.Context())
canc()
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
parentRoot = roblock.Block().ParentRoot()
@@ -3477,3 +3481,219 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
}
}
}
func TestHandleBlockPayloadAttestations(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.GloasForkEpoch = 0
params.OverrideBeaconConfig(cfg)
t.Run("pre-Gloas block is no-op", func(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
blk := util.NewBeaconBlockElectra()
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
st, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, s.handleBlockPayloadAttestations(t.Context(), wsb.Block(), st))
})
t.Run("empty payload attestations", func(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
blk := util.NewBeaconBlockGloas()
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
st, err := util.NewBeaconStateGloas()
require.NoError(t, err)
require.NoError(t, s.handleBlockPayloadAttestations(t.Context(), wsb.Block(), st))
})
t.Run("unknown root is skipped", func(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
numVals := 2048
headState := gloasStateWithValidators(t, 2, numVals)
unknownRoot := bytesutil.ToBytes32([]byte("unknown"))
bits := bitfield.NewBitvector512()
bits.SetBitAt(0, true)
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
Body: &ethpb.BeaconBlockBodyGloas{
PayloadAttestations: []*ethpb.PayloadAttestation{
{
AggregationBits: bits,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: unknownRoot[:],
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: true,
},
Signature: make([]byte, 96),
},
},
},
},
})
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
})
t.Run("known root sets PTC votes", func(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
numVals := 2048
headState := gloasStateWithValidators(t, 2, numVals)
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, insertBlk, blockRoot)
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
require.NotEqual(t, 0, len(ptc))
bits := bitfield.NewBitvector512()
bits.SetBitAt(0, true)
bits.SetBitAt(2, true)
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
Body: &ethpb.BeaconBlockBodyGloas{
PayloadAttestations: []*ethpb.PayloadAttestation{
{
AggregationBits: bits,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: blockRoot[:],
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: true,
},
Signature: make([]byte, 96),
},
},
},
},
})
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
})
t.Run("multiple attestations", func(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
numVals := 2048
headState := gloasStateWithValidators(t, 2, numVals)
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, insertBlk, blockRoot)
bits1 := bitfield.NewBitvector512()
bits1.SetBitAt(0, true)
bits2 := bitfield.NewBitvector512()
bits2.SetBitAt(1, true)
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: 2,
Body: &ethpb.BeaconBlockBodyGloas{
PayloadAttestations: []*ethpb.PayloadAttestation{
{
AggregationBits: bits1,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: blockRoot[:],
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: make([]byte, 96),
},
{
AggregationBits: bits2,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: blockRoot[:],
Slot: 1,
PayloadPresent: false,
BlobDataAvailable: true,
},
Signature: make([]byte, 96),
},
},
},
},
})
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
})
}
func TestUpdateCachesAndEpochBoundary_MatchingRoots(t *testing.T) {
service := testServiceNoDB(t)
st, _ := util.DeterministicGenesisState(t, 1)
accessRoot := [32]byte{'a'}
service.updateCachesAndEpochBoundary(t.Context(), 1, st, accessRoot, accessRoot[:], st)
cached := transition.NextSlotState(accessRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
}
func TestUpdateCachesAndEpochBoundary_DifferentRoots(t *testing.T) {
service := testServiceNoDB(t)
headState, _ := util.DeterministicGenesisState(t, 1)
lastState, _ := util.DeterministicGenesisState(t, 1)
accessRoot := [32]byte{'a'}
lastRoot := [32]byte{'b'}
service.updateCachesAndEpochBoundary(t.Context(), 1, headState, accessRoot, lastRoot[:], lastState)
// Cache should be keyed by accessRoot, not lastRoot.
cached := transition.NextSlotState(accessRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
cached = transition.NextSlotState(lastRoot[:], 1)
require.Equal(t, true, cached == nil)
}
func TestRefreshCaches_NoCachedState(t *testing.T) {
service := testServiceNoDB(t)
st, _ := util.DeterministicGenesisState(t, 1)
headRoot := [32]byte{'h'}
service.refreshCaches(t.Context(), 1, headRoot, st, headRoot)
cached := transition.NextSlotState(headRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
}
func TestRefreshCaches_CachedStateMatchesAccessRoot(t *testing.T) {
service := testServiceNoDB(t)
st, _ := util.DeterministicGenesisState(t, 1)
accessRoot := [32]byte{'a'}
headRoot := [32]byte{'h'}
// Pre-populate the cache with accessRoot.
require.NoError(t, transition.UpdateNextSlotCache(t.Context(), accessRoot[:], st))
service.refreshCaches(t.Context(), 1, headRoot, st, accessRoot)
cached := transition.NextSlotState(accessRoot[:], 1)
require.NotNil(t, cached)
require.Equal(t, primitives.Slot(1), cached.Slot())
}

View File

@@ -110,13 +110,13 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
@@ -172,13 +172,13 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()

View File

@@ -45,6 +45,8 @@ type BlockReceiver interface {
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
GetBlockPreState(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
GetPrestateToPropose(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
}
// BlobReceiver interface defines the methods of chain service for receiving new
@@ -95,18 +97,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err != nil {
return errors.Wrap(err, "block copy")
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
currentCheckpoints := s.saveCurrentCheckpoints(preState)
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "new ro block with root")
}
preState, err := s.GetBlockPreState(ctx, roblock)
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
currentCheckpoints := s.saveCurrentCheckpoints(preState)
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
if err != nil {
return errors.Wrap(err, "validator execution and consensus")
@@ -152,7 +153,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Have we been finalizing? Should we start saving hot states to db?
if err := s.checkSaveHotStateDB(ctx); err != nil {
return errors.Wrap(err, "check save hot state db")
log.WithError(err).Error("Could not check save hot state DB")
}
// We apply the same heuristic to some of our more important caches.
@@ -211,6 +212,16 @@ func (s *Service) validateExecutionAndConsensus(
preState state.BeaconState,
block blocks.ROBlock,
) (state.BeaconState, bool, error) {
if block.Version() >= version.Gloas {
postState, err := s.validateStateTransition(ctx, preState, block)
if errors.Is(err, ErrNotDescendantOfFinalized) {
return nil, false, invalidBlock{error: err, root: block.Root()}
}
if err != nil {
return nil, false, errors.Wrap(err, "failed to validate consensus state transition function")
}
return postState, false, nil
}
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, false, err
@@ -244,6 +255,10 @@ func (s *Service) validateExecutionAndConsensus(
}
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
// Gloas DA is handled on the payload enevelope.
if block.Version() >= version.Gloas {
return 0, nil
}
var err error
start := time.Now()
if avs != nil {
@@ -351,6 +366,8 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
}
}()
}
go s.checkpointStateCache.EvictUpTo(finalized.Epoch)
}
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
@@ -633,7 +650,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.BlockHash()))
s.cfg.ForkChoiceStore.Unlock()
return false, err
}

View File

@@ -0,0 +1,319 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// ExecutionPayloadEnvelopeReceiver defines the methods for receiving execution payload envelopes.
type ExecutionPayloadEnvelopeReceiver interface {
ReceiveExecutionPayloadEnvelope(context.Context, interfaces.ROSignedExecutionPayloadEnvelope) error
}
// ReceiveExecutionPayloadEnvelope processes a signed execution payload envelope for the Gloas fork.
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveExecutionPayloadEnvelope")
defer span.End()
envelope, err := signed.Envelope()
if err != nil {
return errors.Wrap(err, "could not get envelope")
}
root := envelope.BeaconBlockRoot()
err = s.payloadBeingSynced.set(root)
if errors.Is(err, errBlockBeingSynced) {
log.WithField("blockRoot", fmt.Sprintf("%#x", root)).Debug("Ignoring payload envelope currently being synced")
return nil
}
defer s.payloadBeingSynced.unset(root)
preState, err := s.getPayloadEnvelopePrestate(ctx, envelope)
if err != nil {
return err
}
var isValidPayload bool
g, gCtx := errgroup.WithContext(ctx)
g.Go(func() error {
return gloas.ProcessExecutionPayload(gCtx, preState, signed)
})
g.Go(func() error {
var elErr error
isValidPayload, elErr = s.validateExecutionOnEnvelope(gCtx, preState, envelope)
return elErr
})
if err := g.Wait(); err != nil {
return err
}
// DA check: verify data columns are available before inserting payload.
bid, err := preState.LatestExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get latest execution payload bid")
}
if len(bid.BlobKzgCommitments()) > 0 {
if err := s.areDataColumnsAvailable(ctx, root, envelope.Slot()); err != nil {
return errors.Wrap(err, "data availability check failed for payload envelope")
}
}
if err := s.savePostPayload(ctx, signed, preState); err != nil {
return err
}
if err := s.InsertPayload(envelope); err != nil {
return errors.Wrap(err, "could not insert payload into forkchoice")
}
if isValidPayload {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root); err != nil {
log.WithError(err).Error("Could not set optimistic to valid")
}
s.cfg.ForkChoiceStore.Unlock()
}
headRoot, err := s.HeadRoot(ctx)
if err != nil {
log.WithError(err).Error("Could not get head root")
return nil
}
if err := s.postPayloadHeadUpdate(ctx, envelope, preState, root, headRoot); err != nil {
return err
}
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.PayloadProcessed,
Data: &statefeed.PayloadProcessedData{
Slot: envelope.Slot(),
BlockRoot: root,
},
})
execution, err := envelope.Execution()
if err != nil {
log.WithError(err).Error("Could not get execution payload from envelope for logging")
return nil
}
log.WithFields(logrus.Fields{
"slot": envelope.Slot(),
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.ParentHash())),
}).Info("Processed execution payload envelope")
return nil
}
func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope, st state.BeaconState, root [32]byte, headRoot []byte) error {
if !bytes.Equal(headRoot, root[:]) {
return nil
}
payload, err := envelope.Execution()
if err != nil {
return errors.Wrap(err, "could not get execution payload from envelope")
}
blockHash := bytesutil.ToBytes32(payload.BlockHash())
s.headLock.Lock()
s.head.state = st
s.headLock.Unlock()
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
log.WithError(err).Error("Could not update next slot cache")
}
if err := s.handleEpochBoundary(ctx, envelope.Slot(), st, blockHash[:]); err != nil {
log.WithError(err).Error("Could not handle epoch boundary")
}
}()
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot)
if s.inRegularSync() {
go func() {
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, blockHash, attr)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return
}
if attr != nil && !attr.IsEmpty() && pid != nil {
var pId [8]byte
copy(pId[:], pid[:])
s.cfg.PayloadIDCache.Set(envelope.Slot()+1, root, pId)
}
}()
}
return nil
}
func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.getPayloadEnvelopePrestate")
defer span.End()
root := envelope.BeaconBlockRoot()
if !s.InForkchoice(root) {
return nil, fmt.Errorf("beacon block root %#x not found in forkchoice", root)
}
if err := s.verifyBlkPreState(ctx, root); err != nil {
return nil, errors.Wrap(err, "could not verify pre-state")
}
preState, err := s.cfg.StateGen.StateByRoot(ctx, root)
if err != nil {
return nil, errors.Wrap(err, "could not get pre-state by root")
}
if preState == nil || preState.IsNil() {
return nil, fmt.Errorf("nil pre-state for beacon block root %#x", root)
}
return preState, nil
}
// The returned boolean indicates whether the payload was valid or if it was accepted as syncing (optimistic).
func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelope")
defer span.End()
payload, err := envelope.Execution()
if err != nil {
return false, errors.Wrap(err, "could not get execution payload from envelope")
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return false, errors.Wrap(err, "could not get latest execution payload bid")
}
commitments := latestBid.BlobKzgCommitments()
versionedHashes := make([]common.Hash, len(commitments))
for i, c := range commitments {
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
}
parentRoot := common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot))
requests := envelope.ExecutionRequests()
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
if err == nil {
return true, nil
}
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
log.WithFields(logrus.Fields{
"slot": envelope.Slot(),
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic envelope")
return false, nil
}
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
return false, invalidBlock{error: ErrInvalidPayload}
}
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
isValid, err := s.notifyNewEnvelope(ctx, st, envelope)
if err == nil {
return isValid, nil
}
blockRoot := envelope.BeaconBlockRoot()
parentRoot := bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot)
payload, payloadErr := envelope.Execution()
if payloadErr != nil {
return false, errors.Wrap(payloadErr, "could not get execution payload from envelope")
}
parentHash := bytesutil.ToBytes32(payload.ParentHash())
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return false, s.handleInvalidExecutionError(ctx, err, blockRoot, parentRoot, parentHash)
}
func (s *Service) savePostPayload(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "blockChain.savePostPayload")
defer span.End()
protoEnv, ok := signed.Proto().(*ethpb.SignedExecutionPayloadEnvelope)
if !ok {
return errors.New("could not type assert signed envelope to proto")
}
if err := s.cfg.BeaconDB.SaveExecutionPayloadEnvelope(ctx, protoEnv); err != nil {
return errors.Wrap(err, "could not save execution payload envelope")
}
envelope, err := signed.Envelope()
if err != nil {
return errors.Wrap(err, "could not get envelope")
}
payload, err := envelope.Execution()
if err != nil {
return errors.Wrap(err, "could not get execution payload from envelope")
}
return s.cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(payload.BlockHash()), st)
}
// notifyForkchoiceUpdateGloas takes the block hash directly because Gloas
// blocks don't carry an execution payload in the body.
func (s *Service) notifyForkchoiceUpdateGloas(ctx context.Context, blockHash [32]byte, attributes payloadattribute.Attributer) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdateGloas")
defer span.End()
s.cfg.ForkChoiceStore.RLock()
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
s.cfg.ForkChoiceStore.RUnlock()
fcs := &enginev1.ForkchoiceState{
HeadBlockHash: blockHash[:],
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
if attributes == nil {
attributes = payloadattribute.EmptyWithVersion(version.Gloas)
}
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attributes)
if err == nil {
return payloadID, nil
}
switch {
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
log.WithFields(logrus.Fields{
"headBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(blockHash[:])),
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
}).Info("Called forkchoice updated with optimistic block (Gloas)")
return payloadID, nil
case errors.Is(err, execution.ErrInvalidPayloadStatus):
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
return nil, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: bytesutil.ToBytes32(lastValidHash),
}
default:
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
return nil, nil
}
}

View File

@@ -0,0 +1,39 @@
package blockchain
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/pkg/errors"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// PayloadAttestationReceiver interface defines the methods of chain service for receiving
// validated payload attestation messages.
type PayloadAttestationReceiver interface {
ReceivePayloadAttestationMessage(context.Context, *ethpb.PayloadAttestationMessage) error
}
// ReceivePayloadAttestationMessage accepts a payload attestation message and updates the
// forkchoice PTC vote bitvectors for the referenced beacon block.
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error {
if a == nil || a.Data == nil {
return errors.New("nil payload attestation message")
}
root := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return err
}
idx, err := gloas.PayloadCommitteeIndex(ctx, st, a.Data.Slot, a.ValidatorIndex)
if err != nil {
return err
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.cfg.ForkChoiceStore.SetPTCVote(root, idx, a.Data.PayloadPresent, a.Data.BlobDataAvailable)
return nil
}

View File

@@ -0,0 +1,143 @@
package blockchain
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestReceivePayloadAttestationMessage_NilMessage(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
err := s.ReceivePayloadAttestationMessage(t.Context(), nil)
require.ErrorContains(t, "nil payload attestation message", err)
}
func TestReceivePayloadAttestationMessage_NilData(t *testing.T) {
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
msg := &ethpb.PayloadAttestationMessage{}
err := s.ReceivePayloadAttestationMessage(t.Context(), msg)
require.ErrorContains(t, "nil payload attestation message", err)
}
func TestReceivePayloadAttestationMessage_ValidatorNotInPTC(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.GloasForkEpoch = 0
params.OverrideBeaconConfig(cfg)
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
numVals := 2048
headState := gloasStateWithValidators(t, 1, numVals)
base, blk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, blk, blockRoot)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
// Pick a validator index not in the PTC.
inPTC := make(map[primitives.ValidatorIndex]bool)
for _, idx := range ptc {
inPTC[idx] = true
}
var notInPTC primitives.ValidatorIndex
for i := primitives.ValidatorIndex(0); int(i) < numVals; i++ {
if !inPTC[i] {
notInPTC = i
break
}
}
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: notInPTC,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: blockRoot[:],
Slot: 1,
},
}
err = s.ReceivePayloadAttestationMessage(ctx, msg)
require.ErrorContains(t, "validator not in PTC", err)
}
func TestReceivePayloadAttestationMessage_OK(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.GloasForkEpoch = 0
params.OverrideBeaconConfig(cfg)
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
ctx := t.Context()
blockRoot := bytesutil.ToBytes32([]byte("root1"))
parentRoot := params.BeaconConfig().ZeroHash
blockHash := bytesutil.ToBytes32([]byte("hash1"))
headState := gloasStateWithValidators(t, 1, 2048)
base, blk := testGloasState(t, 1, parentRoot, blockHash)
insertGloasBlock(t, s, base, blk, blockRoot)
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
ptc, err := gloas.PayloadCommittee(ctx, headState, 1)
require.NoError(t, err)
require.NotEqual(t, 0, len(ptc))
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: ptc[0],
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: blockRoot[:],
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: true,
},
}
require.NoError(t, s.ReceivePayloadAttestationMessage(ctx, msg))
}
// gloasStateWithValidators returns a Gloas beacon state with active validators
// for PTC committee computation.
func gloasStateWithValidators(t *testing.T, slot primitives.Slot, numVals int) state.BeaconState {
t.Helper()
validators := make([]*ethpb.Validator, numVals)
balances := make([]uint64, numVals)
for i := range validators {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
balances[i] = params.BeaconConfig().MaxEffectiveBalanceElectra
}
st, err := util.NewBeaconStateGloas(func(s *ethpb.BeaconStateGloas) error {
s.Slot = slot
s.Validators = validators
s.Balances = balances
return nil
})
require.NoError(t, err)
return st
}

View File

@@ -62,6 +62,7 @@ type Service struct {
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
payloadBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
slasherEnabled bool
@@ -186,6 +187,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
blobNotifiers: bn,
cfg: &config{},
blockBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
payloadBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
syncCommitteeHeadState: cache.NewSyncCommitteeHeadState(),
}
for _, opt := range opts {

View File

@@ -166,11 +166,11 @@ func (s *Service) setupForkchoiceCheckpoints() error {
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
log.WithError(err).Error("Could not update forkchoice's justified checkpoint, trying to update finalized checkpoint anyway")
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: fRoot}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
log.WithError(err).Error("Could not update forkchoice's finalized checkpoint")
}
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
return nil

View File

@@ -104,7 +104,9 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
preState, err := service.GetBlockPreState(ctx, roblock)
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -77,6 +77,10 @@ type ChainService struct {
DataColumns []blocks.VerifiedRODataColumn
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
MockCanonicalRoots map[primitives.Slot][32]byte
MockCanonicalFull map[primitives.Slot]bool
ParentPayloadReadyVal *bool
ForkchoiceRoots map[[32]byte]bool
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -334,6 +338,16 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
return nil
}
// GetBlockPreState mocks the same method in the chain service.
func (s *ChainService) GetBlockPreState(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
return s.State, nil
}
// GetPrestateToPropose mocks the same method in the chain service.
func (s *ChainService) GetPrestateToPropose(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
return s.State.Copy(), nil
}
// HeadSlot mocks HeadSlot method in chain service.
func (s *ChainService) HeadSlot() primitives.Slot {
if s.MockHeadSlot != nil {
@@ -569,7 +583,10 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
}
// InForkchoice mocks the same method in the chain service
func (s *ChainService) InForkchoice(_ [32]byte) bool {
func (s *ChainService) InForkchoice(root [32]byte) bool {
if s.ForkchoiceRoots != nil {
return s.ForkchoiceRoots[root]
}
return !s.NotFinalized
}
@@ -630,7 +647,7 @@ func prepareForkchoiceState(
}
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
st, err := state_native.InitializeFromProtoBellatrix(base)
st, err := state_native.InitializeFromProtoUnsafeBellatrix(base)
if err != nil {
return nil, blocks.ROBlock{}, err
}
@@ -689,7 +706,42 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockSlot()
}
return 0
if s.Slot != nil {
return *s.Slot
}
return s.BlockSlot
}
// HighestReceivedBlockRoot mocks the same method in the chain service
func (s *ChainService) HighestReceivedBlockRoot() [32]byte {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockRoot()
}
if s.Slot != nil && s.MockCanonicalRoots != nil {
if root, ok := s.MockCanonicalRoots[*s.Slot]; ok {
return root
}
}
if len(s.Root) == 32 {
return bytesutil.ToBytes32(s.Root)
}
return [32]byte{}
}
// HasFullNode mocks the same method in the chain service
func (s *ChainService) HasFullNode(root [32]byte) bool {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HasFullNode(root)
}
if s.Slot != nil && s.MockCanonicalRoots != nil && s.MockCanonicalFull != nil {
if r, ok := s.MockCanonicalRoots[*s.Slot]; ok && r == root {
return s.MockCanonicalFull[*s.Slot]
}
}
if s.ForkchoiceRoots != nil {
return s.ForkchoiceRoots[root]
}
return false
}
// InsertNode mocks the same method in the chain service
@@ -700,6 +752,14 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, blo
return nil
}
// InsertPayload mocks the same method in the chain service
func (s *ChainService) InsertPayload(pe interfaces.ROExecutionPayloadEnvelope) error {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.InsertPayload(pe)
}
return nil
}
// ForkChoiceDump mocks the same method in the chain service
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
if s.ForkChoiceStore != nil {
@@ -757,6 +817,24 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// ReceivePayloadAttestationMessage implements the same method in the chain service.
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
return nil
}
// ReceiveExecutionPayloadEnvelope implements the same method in the chain service.
func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ interfaces.ROSignedExecutionPayloadEnvelope) error {
return nil
}
// ParentPayloadReady mocks the same method in the chain service.
func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool {
if s.ParentPayloadReadyVal != nil {
return *s.ParentPayloadReadyVal
}
return true
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
@@ -767,6 +845,17 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
return c.TargetRoot, nil
}
func (c *ChainService) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
var root [32]byte
if c.MockCanonicalRoots != nil {
root = c.MockCanonicalRoots[slot]
}
if c.MockCanonicalFull != nil {
return root, c.MockCanonicalFull[slot]
}
return root, false
}
// MockSyncChecker is a mock implementation of blockchain.Checker.
// We can't make an assertion here that this is true because that would create a circular dependency.
type MockSyncChecker struct {

View File

@@ -17,6 +17,7 @@ go_library(
"error.go",
"interfaces.go",
"log.go",
"payload_attestation.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
@@ -54,6 +55,7 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
@@ -76,6 +78,7 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_attestation_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",

View File

@@ -9,10 +9,11 @@ import (
)
type AttestationConsensusData struct {
Slot primitives.Slot
HeadRoot []byte
Target forkchoicetypes.Checkpoint
Source forkchoicetypes.Checkpoint
Slot primitives.Slot
HeadRoot []byte
Target forkchoicetypes.Checkpoint
Source forkchoicetypes.Checkpoint
IsPayloadFull bool
}
// AttestationDataCache stores cached results of AttestationData requests.

View File

@@ -3,8 +3,10 @@ package cache
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -25,6 +27,14 @@ var (
Name: "check_point_state_cache_hit",
Help: "The number of check point state requests that are present in the cache.",
})
checkpointStateSize = promauto.NewGauge(prometheus.GaugeOpts{
Name: "check_point_state_cache_size",
Help: "The number of entries in the check point state cache.",
})
checkpointStateEvicted = promauto.NewCounter(prometheus.CounterOpts{
Name: "check_point_state_cache_evicted_total",
Help: "The number of entries evicted from the check point state cache.",
})
)
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
@@ -49,14 +59,14 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
item, exists := c.cache.Get(h)
if exists && item != nil {
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
if !exists || item == nil {
checkpointStateMiss.Inc()
return nil, nil
}
checkpointStateMiss.Inc()
return nil, nil
checkpointStateHit.Inc()
// Copy here is unnecessary since the return will only be used to verify attestation signature.
return item.(state.BeaconState), nil
}
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
@@ -66,6 +76,35 @@ func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.
if err != nil {
return err
}
c.cache.Add(h, s)
checkpointStateSize.Set(float64(c.cache.Len()))
return nil
}
// EvictUpTo removes all entries from the cache whose state epoch is at
// or before the given epoch. Returns the number of evicted entries.
func (c *CheckpointStateCache) EvictUpTo(epoch primitives.Epoch) int {
evicted := 0
for _, key := range c.cache.Keys() {
// Peek is used here to avoid updating the recency of the entry,
// as we are only checking for eviction.
v, ok := c.cache.Peek(key)
if !ok {
continue
}
st := v.(state.ReadOnlyBeaconState)
if slots.ToEpoch(st.Slot()) <= epoch {
c.cache.Remove(key)
evicted++
}
}
if evicted > 0 {
checkpointStateSize.Set(float64(c.cache.Len()))
checkpointStateEvicted.Add(float64(evicted))
}
return evicted
}

View File

@@ -72,3 +72,76 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
}
func TestCheckpointStateCache_EvictFinalized_FinalizedEntry(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 32})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp, st))
evicted := c.EvictUpTo(1)
assert.Equal(t, 1, evicted, "expected finalized entry to be evicted")
s, err := c.StateByCheckpoint(cp)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cache to be empty after eviction")
}
func TestCheckpointStateCache_EvictFinalized_NotFinalizedEntry(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 160})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp, st))
evicted := c.EvictUpTo(3)
assert.Equal(t, 0, evicted, "expected non-finalized entry NOT to be evicted")
s, err := c.StateByCheckpoint(cp)
require.NoError(t, err)
assert.NotNil(t, s, "expected entry to still be in cache")
}
func TestCheckpointStateCache_EvictFinalized_Mixed(t *testing.T) {
c := cache.NewCheckpointStateCache()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
st1, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 32})
require.NoError(t, err)
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
st2, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 64})
require.NoError(t, err)
cp5 := &ethpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'C'}, 32)}
st5, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Slot: 160})
require.NoError(t, err)
require.NoError(t, c.AddCheckpointState(cp1, st1))
require.NoError(t, c.AddCheckpointState(cp2, st2))
require.NoError(t, c.AddCheckpointState(cp5, st5))
evicted := c.EvictUpTo(3)
assert.Equal(t, 2, evicted, "expected epochs 1 and 2 to be evicted")
s, err := c.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cp1 to be evicted")
s, err = c.StateByCheckpoint(cp2)
require.NoError(t, err)
assert.Equal(t, state.BeaconState(nil), s, "expected cp2 to be evicted")
s, err = c.StateByCheckpoint(cp5)
require.NoError(t, err)
assert.NotNil(t, s, "expected cp5 to still be in cache")
}
func TestCheckpointStateCache_EvictFinalized_EmptyCache(t *testing.T) {
c := cache.NewCheckpointStateCache()
evicted := c.EvictUpTo(0)
assert.Equal(t, 0, evicted, "expected no eviction from empty cache")
}

View File

@@ -0,0 +1,53 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// PayloadAttestationCache tracks seen payload attestation messages for a single slot.
type PayloadAttestationCache struct {
slot primitives.Slot
seen map[primitives.ValidatorIndex]struct{}
mu sync.RWMutex
}
// Seen returns true if a vote for the given slot has already been
// processed for this validator index.
func (p *PayloadAttestationCache) Seen(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
p.mu.RLock()
defer p.mu.RUnlock()
if p.slot != slot {
return false
}
if p.seen == nil {
return false
}
_, ok := p.seen[idx]
return ok
}
// Add marks the given slot and validator index as seen.
// This function assumes that the message has already been validated.
func (p *PayloadAttestationCache) Add(slot primitives.Slot, idx primitives.ValidatorIndex) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.slot != slot {
p.slot = slot
p.seen = make(map[primitives.ValidatorIndex]struct{})
}
if p.seen == nil {
p.seen = make(map[primitives.ValidatorIndex]struct{})
}
p.seen[idx] = struct{}{}
return nil
}
// Clear clears the internal cache.
func (p *PayloadAttestationCache) Clear() {
p.mu.Lock()
defer p.mu.Unlock()
p.slot = 0
p.seen = nil
}

View File

@@ -0,0 +1,48 @@
package cache_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/stretchr/testify/require"
)
func TestPayloadAttestationCache_SeenAndAdd(t *testing.T) {
var c cache.PayloadAttestationCache
slot1 := primitives.Slot(1)
slot2 := primitives.Slot(2)
idx1 := primitives.ValidatorIndex(3)
idx2 := primitives.ValidatorIndex(4)
require.False(t, c.Seen(slot1, idx1))
require.NoError(t, c.Add(slot1, idx1))
require.True(t, c.Seen(slot1, idx1))
require.False(t, c.Seen(slot1, idx2))
require.False(t, c.Seen(slot2, idx1))
require.NoError(t, c.Add(slot1, idx2))
require.True(t, c.Seen(slot1, idx1))
require.True(t, c.Seen(slot1, idx2))
require.NoError(t, c.Add(slot2, idx1))
require.True(t, c.Seen(slot2, idx1))
require.False(t, c.Seen(slot1, idx1))
require.False(t, c.Seen(slot1, idx2))
}
func TestPayloadAttestationCache_Clear(t *testing.T) {
var c cache.PayloadAttestationCache
slot := primitives.Slot(10)
idx := primitives.ValidatorIndex(42)
require.NoError(t, c.Add(slot, idx))
require.True(t, c.Seen(slot, idx))
c.Clear()
require.False(t, c.Seen(slot, idx))
require.NoError(t, c.Add(slot, idx))
require.True(t, c.Seen(slot, idx))
}

View File

@@ -20,6 +20,7 @@ go_library(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
@@ -75,7 +76,11 @@ func ProcessAttestationNoVerifySignature(
return nil, err
}
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance)
if err := beaconState.UpdatePendingPaymentWeight(att, indices, participatedFlags); err != nil {
return nil, errors.Wrap(err, "failed to update pending payment weight")
}
return SetParticipationAndRewardProposer(ctx, beaconState, att.GetData().Target.Epoch, indices, participatedFlags, totalBalance, att)
}
// SetParticipationAndRewardProposer retrieves and sets the epoch participation bits in state. Based on the epoch participation, it rewards
@@ -105,7 +110,9 @@ func SetParticipationAndRewardProposer(
beaconState state.BeaconState,
targetEpoch primitives.Epoch,
indices []uint64,
participatedFlags map[uint8]bool, totalBalance uint64) (state.BeaconState, error) {
participatedFlags map[uint8]bool,
totalBalance uint64,
att ethpb.Att) (state.BeaconState, error) {
var proposerRewardNumerator uint64
currentEpoch := time.CurrentEpoch(beaconState)
var stateErr error
@@ -299,6 +306,19 @@ func AttestationParticipationFlagIndices(beaconState state.ReadOnlyBeaconState,
participatedFlags[targetFlagIndex] = true
}
matchedSrcTgtHead := matchedHead && matchedSrcTgt
var beaconBlockRoot [32]byte
copy(beaconBlockRoot[:], data.BeaconBlockRoot)
matchingPayload, err := gloas.MatchingPayload(
beaconState,
beaconBlockRoot,
data.Slot,
uint64(data.CommitteeIndex),
)
if err != nil {
return nil, err
}
matchedSrcTgtHead = matchedSrcTgtHead && matchingPayload
if matchedSrcTgtHead && delay == cfg.MinAttestationInclusionDelay {
participatedFlags[headFlagIndex] = true
}

View File

@@ -1,7 +1,9 @@
package altair_test
import (
"bytes"
"fmt"
"reflect"
"testing"
"github.com/OffchainLabs/go-bitfield"
@@ -556,7 +558,7 @@ func TestSetParticipationAndRewardProposer(t *testing.T) {
b, err := helpers.TotalActiveBalance(beaconState)
require.NoError(t, err)
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b)
st, err := altair.SetParticipationAndRewardProposer(t.Context(), beaconState, test.epoch, test.indices, test.participatedFlags, b, &ethpb.Attestation{})
require.NoError(t, err)
i, err := helpers.BeaconProposerIndex(t.Context(), st)
@@ -775,11 +777,67 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
headFlagIndex: true,
},
},
{
name: "gloas same-slot committee index non-zero errors",
inputState: func() state.BeaconState {
stateSlot := primitives.Slot(5)
slot := primitives.Slot(3)
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
headRoot := bytes.Repeat([]byte{0xBB}, 32)
prevRoot := bytes.Repeat([]byte{0xCC}, 32)
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, prevRoot, 0, 0)
}(),
inputData: &ethpb.AttestationData{
Slot: 3,
CommitteeIndex: 1, // invalid for same-slot
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
Source: &ethpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: bytes.Repeat([]byte{0xAA}, 32),
},
},
inputDelay: 1,
participationIndices: nil,
},
{
name: "gloas payload availability matches committee index",
inputState: func() state.BeaconState {
stateSlot := primitives.Slot(5)
slot := primitives.Slot(3)
targetRoot := bytes.Repeat([]byte{0xAA}, 32)
headRoot := bytes.Repeat([]byte{0xBB}, 32)
// Same prev root to make SameSlotAttestation false and use payload availability.
return buildGloasStateForFlags(t, stateSlot, slot, targetRoot, headRoot, headRoot, 1, slot)
}(),
inputData: &ethpb.AttestationData{
Slot: 3,
CommitteeIndex: 1,
BeaconBlockRoot: bytes.Repeat([]byte{0xBB}, 32),
Source: &ethpb.Checkpoint{Root: bytes.Repeat([]byte{0xDD}, 32)},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: bytes.Repeat([]byte{0xAA}, 32),
},
},
inputDelay: 1,
participationIndices: map[uint8]bool{
sourceFlagIndex: true,
targetFlagIndex: true,
headFlagIndex: true,
},
},
}
for _, test := range tests {
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
if test.participationIndices == nil {
require.ErrorContains(t, "committee index", err)
continue
}
require.NoError(t, err)
require.DeepEqual(t, test.participationIndices, flagIndices)
if !reflect.DeepEqual(test.participationIndices, flagIndices) {
t.Fatalf("unexpected participation indices: got %v want %v", flagIndices, test.participationIndices)
}
}
}
@@ -858,3 +916,61 @@ func TestMatchingStatus(t *testing.T) {
require.Equal(t, test.matchedHead, head)
}
}
func buildGloasStateForFlags(t *testing.T, stateSlot, slot primitives.Slot, targetRoot, headRoot, prevRoot []byte, availabilityBit uint8, availabilitySlot primitives.Slot) state.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
blockRoots[0] = targetRoot
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = headRoot
blockRoots[(slot-1)%cfg.SlotsPerHistoricalRoot] = prevRoot
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range stateRoots {
stateRoots[i] = make([]byte, fieldparams.RootLength)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = make([]byte, fieldparams.RootLength)
}
execPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
idx := availabilitySlot % cfg.SlotsPerHistoricalRoot
byteIndex := idx / 8
bitIndex := idx % 8
if availabilityBit == 1 {
execPayloadAvailability[byteIndex] |= 1 << bitIndex
}
checkpointRoot := bytes.Repeat([]byte{0xDD}, fieldparams.RootLength)
justified := &ethpb.Checkpoint{Root: checkpointRoot}
stProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
ExecutionPayloadAvailability: execPayloadAvailability,
CurrentJustifiedCheckpoint: justified,
PreviousJustifiedCheckpoint: justified,
Validators: []*ethpb.Validator{
{
EffectiveBalance: cfg.MinActivationBalance,
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
},
},
Balances: []uint64{cfg.MinActivationBalance},
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
Epoch: 0,
},
}
beaconState, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return beaconState
}

View File

@@ -111,10 +111,21 @@ func VerifyAttestationNoVerifySignature(
var indexedAtt ethpb.IndexedAtt
if att.Version() >= version.Electra {
if att.GetData().CommitteeIndex != 0 {
return errors.New("committee index must be 0 post-Electra")
ci := att.GetData().CommitteeIndex
// Spec v1.7.0-alpha pseudocode:
//
// # [Modified in Gloas:EIP7732]
// assert data.index < 2
//
if beaconState.Version() >= version.Gloas {
if ci >= 2 {
return fmt.Errorf("incorrect committee index %d", ci)
}
} else {
if ci != 0 {
return errors.New("committee index must be 0 between Electra and Gloas forks")
}
}
aggBits := att.GetAggregationBits()
committeeIndices := att.CommitteeBitsVal().BitIndices()
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"bytes"
"context"
"testing"
@@ -262,7 +263,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
CommitteeBits: bitfield.NewBitvector64(),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
assert.ErrorContains(t, "committee index must be 0", err)
})
t.Run("index of committee too big", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
@@ -314,6 +315,75 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
})
}
func TestVerifyAttestationNoVerifySignature_GloasCommitteeIndexLimit(t *testing.T) {
cfg := params.BeaconConfig()
stateSlot := cfg.MinAttestationInclusionDelay + 1
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range blockRoots {
blockRoots[i] = make([]byte, fieldparams.RootLength)
}
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range stateRoots {
stateRoots[i] = make([]byte, fieldparams.RootLength)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = make([]byte, fieldparams.RootLength)
}
checkpointRoot := bytes.Repeat([]byte{0xAA}, fieldparams.RootLength)
justified := &ethpb.Checkpoint{Epoch: 0, Root: checkpointRoot}
gloasStateProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, fieldparams.RootLength),
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
CurrentJustifiedCheckpoint: justified,
PreviousJustifiedCheckpoint: justified,
Validators: []*ethpb.Validator{
{
EffectiveBalance: cfg.MinActivationBalance,
WithdrawalCredentials: append([]byte{cfg.ETH1AddressWithdrawalPrefixByte}, bytes.Repeat([]byte{0x01}, 31)...),
},
},
Balances: []uint64{cfg.MinActivationBalance},
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2),
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
Epoch: 0,
},
}
beaconState, err := state_native.InitializeFromProtoGloas(gloasStateProto)
require.NoError(t, err)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(0, true)
aggBits := bitfield.NewBitlist(1)
aggBits.SetBitAt(0, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Slot: 0,
CommitteeIndex: 2, // invalid for Gloas (must be <2)
BeaconBlockRoot: blockRoots[0],
Source: justified,
Target: justified,
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
Signature: bytes.Repeat([]byte{0x00}, fieldparams.BLSSignatureLength),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "incorrect committee index 2", err)
}
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
@@ -583,6 +653,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
}
func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing.T) {
helpers.ClearCache()
ctx := t.Context()
numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))
validators := make([]*ethpb.Validator, numOfValidators)

View File

@@ -33,6 +33,8 @@ const (
LightClientOptimisticUpdate
// PayloadAttributes events are fired upon a missed slot or new head.
PayloadAttributes
// PayloadProcessed is sent after a payload envelope has been processed.
PayloadProcessed
)
// BlockProcessedData is the data sent with BlockProcessed events.
@@ -72,3 +74,9 @@ type InitializedData struct {
// GenesisValidatorsRoot represents state.validators.HashTreeRoot().
GenesisValidatorsRoot []byte
}
// PayloadProcessedData is the data sent with PayloadProcessed events.
type PayloadProcessedData struct {
Slot primitives.Slot
BlockRoot [32]byte
}

View File

@@ -3,18 +3,26 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"attestation.go",
"bid.go",
"deposit_request.go",
"log.go",
"payload.go",
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
"upgrade.go",
"withdrawals.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/requests:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
@@ -25,26 +33,37 @@ go_library(
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attestation_test.go",
"bid_test.go",
"deposit_request_test.go",
"payload_attestation_test.go",
"payload_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
"upgrade_test.go",
"withdrawals_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/testing:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -0,0 +1,52 @@
package gloas
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
)
// MatchingPayload returns true if the attestation's committee index matches the expected payload index.
//
// For pre-Gloas forks, this always returns true.
//
// Spec v1.7.0-alpha (pseudocode):
//
// # [New in Gloas:EIP7732]
// if is_attestation_same_slot(state, data):
// assert data.index == 0
// payload_matches = True
// else:
// slot_index = data.slot % SLOTS_PER_HISTORICAL_ROOT
// payload_index = state.execution_payload_availability[slot_index]
// payload_matches = data.index == payload_index
func MatchingPayload(
beaconState state.ReadOnlyBeaconState,
beaconBlockRoot [32]byte,
slot primitives.Slot,
committeeIndex uint64,
) (bool, error) {
if beaconState.Version() < version.Gloas {
return true, nil
}
sameSlot, err := beaconState.IsAttestationSameSlot(beaconBlockRoot, slot)
if err != nil {
return false, errors.Wrap(err, "failed to get same slot attestation status")
}
if sameSlot {
if committeeIndex != 0 {
return false, fmt.Errorf("committee index %d for same slot attestation must be 0", committeeIndex)
}
return true, nil
}
executionPayloadAvail, err := beaconState.ExecutionPayloadAvailability(slot)
if err != nil {
return false, errors.Wrap(err, "failed to get execution payload availability status")
}
return executionPayloadAvail == committeeIndex, nil
}

View File

@@ -0,0 +1,110 @@
package gloas
import (
"bytes"
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func buildStateWithBlockRoots(t *testing.T, stateSlot primitives.Slot, roots map[primitives.Slot][]byte) *state_native.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for slot, root := range roots {
blockRoots[slot%cfg.SlotsPerHistoricalRoot] = root
}
stProto := &ethpb.BeaconStateGloas{
Slot: stateSlot,
BlockRoots: blockRoots,
}
state, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return state.(*state_native.BeaconState)
}
func TestMatchingPayload(t *testing.T) {
t.Run("pre-gloas always true", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
ok, err := MatchingPayload(stIface, [32]byte{}, 0, 123)
require.NoError(t, err)
require.Equal(t, true, ok)
})
t.Run("same slot requires committee index 0", func(t *testing.T) {
root := bytes.Repeat([]byte{0xAA}, 32)
state := buildStateWithBlockRoots(t, 6, map[primitives.Slot][]byte{
4: root,
3: bytes.Repeat([]byte{0xBB}, 32),
})
var rootArr [32]byte
copy(rootArr[:], root)
ok, err := MatchingPayload(state, rootArr, 4, 1)
require.ErrorContains(t, "committee index", err)
require.Equal(t, false, ok)
})
t.Run("same slot matches when committee index is 0", func(t *testing.T) {
root := bytes.Repeat([]byte{0xAA}, 32)
state := buildStateWithBlockRoots(t, 6, map[primitives.Slot][]byte{
4: root,
3: bytes.Repeat([]byte{0xBB}, 32),
})
var rootArr [32]byte
copy(rootArr[:], root)
ok, err := MatchingPayload(state, rootArr, 4, 0)
require.NoError(t, err)
require.Equal(t, true, ok)
})
t.Run("non same slot checks payload availability", func(t *testing.T) {
cfg := params.BeaconConfig()
root := bytes.Repeat([]byte{0xAA}, 32)
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
blockRoots[4%cfg.SlotsPerHistoricalRoot] = bytes.Repeat([]byte{0xCC}, 32)
blockRoots[3%cfg.SlotsPerHistoricalRoot] = bytes.Repeat([]byte{0xBB}, 32)
availability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
slotIndex := uint64(4)
availability[slotIndex/8] = byte(1 << (slotIndex % 8))
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Slot: 6,
BlockRoots: blockRoots,
ExecutionPayloadAvailability: availability,
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x66}, 4),
PreviousVersion: bytes.Repeat([]byte{0x66}, 4),
Epoch: 0,
},
})
require.NoError(t, err)
state := stIface.(*state_native.BeaconState)
require.Equal(t, version.Gloas, state.Version())
var rootArr [32]byte
copy(rootArr[:], root)
ok, err := MatchingPayload(state, rootArr, 4, 1)
require.NoError(t, err)
require.Equal(t, true, ok)
ok, err = MatchingPayload(state, rootArr, 4, 0)
require.NoError(t, err)
require.Equal(t, false, ok)
})
}

View File

@@ -17,27 +17,56 @@ import (
)
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
//
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == G2_POINT_AT_INFINITY
// else:
// assert is_active_builder(state, builder_index)
// assert can_builder_cover_bid(state, builder_index, amount)
// assert verify_execution_payload_bid_signature(state, signed_bid)
// assert bid.slot == block.slot
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
// if amount > 0:
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
// state.latest_execution_payload_bid = bid
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
//
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
// else:
// # Verify that the builder is active
// assert is_active_builder(state, builder_index)
// # Verify that the builder has funds to cover the bid
// assert can_builder_cover_bid(state, builder_index, amount)
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify commitments are under limit
// assert (
// len(bid.blob_kzg_commitments)
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
// )
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
//
// # Record the pending payment if there is some payment
// if amount > 0:
// pending_payment = BuilderPendingPayment(
// weight=0,
// withdrawal=BuilderPendingWithdrawal(
// fee_recipient=bid.fee_recipient,
// amount=amount,
// builder_index=builder_index,
// ),
// )
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
// pending_payment
// )
//
// # Cache the signed execution payload bid
// state.latest_execution_payload_bid = bid
// </spec>
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
@@ -86,6 +115,12 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
}
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
commitmentCount := bid.BlobKzgCommitmentCount()
if commitmentCount > uint64(maxBlobsPerBlock) {
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}

View File

@@ -184,6 +184,28 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
return out
}
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
if count > max {
count = max
}
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
count := max + 1
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
@@ -194,17 +216,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -236,16 +258,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -280,17 +302,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
@@ -341,17 +363,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -394,17 +416,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -436,17 +458,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
@@ -463,6 +485,42 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
slot := primitives.Slot(9)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "blob KZG commitments over max", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
@@ -478,17 +536,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -520,17 +578,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -563,17 +621,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -605,17 +663,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)

View File

@@ -0,0 +1,173 @@
package gloas
import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func processDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) error {
if len(requests) == 0 {
return nil
}
for _, receipt := range requests {
if err := processDepositRequest(beaconState, receipt); err != nil {
return errors.Wrap(err, "could not apply deposit request")
}
}
return nil
}
// processDepositRequest processes the specific deposit request
//
// <spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
// # [New in Gloas:EIP7732]
// builder_pubkeys = [b.pubkey for b in state.builders]
// validator_pubkeys = [v.pubkey for v in state.validators]
//
// # [New in Gloas:EIP7732]
// # Regardless of the withdrawal credentials prefix, if a builder/validator
// # already exists with this pubkey, apply the deposit to their balance
// is_builder = deposit_request.pubkey in builder_pubkeys
// is_validator = deposit_request.pubkey in validator_pubkeys
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
// if is_builder or (is_builder_prefix and not is_validator):
// # Apply builder deposits immediately
// apply_deposit_for_builder(
// state,
// deposit_request.pubkey,
// deposit_request.withdrawal_credentials,
// deposit_request.amount,
// deposit_request.signature,
// state.slot,
// )
// return
//
// # Add validator deposits to the queue
// state.pending_deposits.append(
// PendingDeposit(
// pubkey=deposit_request.pubkey,
// withdrawal_credentials=deposit_request.withdrawal_credentials,
// amount=deposit_request.amount,
// signature=deposit_request.signature,
// slot=state.slot,
// )
// )
// </spec>
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
if request == nil {
return errors.New("nil deposit request")
}
applied, err := applyBuilderDepositRequest(beaconState, request)
if err != nil {
return errors.Wrap(err, "could not apply builder deposit")
}
if applied {
return nil
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: request.Pubkey,
WithdrawalCredentials: request.WithdrawalCredentials,
Amount: request.Amount,
Signature: request.Signature,
Slot: beaconState.Slot(),
}); err != nil {
return errors.Wrap(err, "could not append deposit request")
}
return nil
}
// <spec fn="apply_deposit_for_builder" fork="gloas" hash="e4bc98c7">
// def apply_deposit_for_builder(
//
// state: BeaconState,
// pubkey: BLSPubkey,
// withdrawal_credentials: Bytes32,
// amount: uint64,
// signature: BLSSignature,
// slot: Slot,
//
// ) -> None:
//
// builder_pubkeys = [b.pubkey for b in state.builders]
// if pubkey not in builder_pubkeys:
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
// add_builder_to_registry(state, pubkey, withdrawal_credentials, amount, slot)
// else:
// # Increase balance by deposit amount
// builder_index = builder_pubkeys.index(pubkey)
// state.builders[builder_index].balance += amount
//
// </spec>
func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (bool, error) {
if beaconState.Version() < version.Gloas {
return false, nil
}
pubkey := bytesutil.ToBytes48(request.Pubkey)
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
if !isBuilder && (!isBuilderPrefix || isValidator) {
return false, nil
}
if isBuilder {
if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
return false, err
}
return true, nil
}
if err := applyDepositForNewBuilder(
beaconState,
request.Pubkey,
request.WithdrawalCredentials,
request.Amount,
request.Signature,
); err != nil {
return false, err
}
return true, nil
}
func applyDepositForNewBuilder(
beaconState state.BeaconState,
pubkey []byte,
withdrawalCredentials []byte,
amount uint64,
signature []byte,
) error {
pubkeyBytes := bytesutil.ToBytes48(pubkey)
valid, err := helpers.IsValidDepositSignature(&ethpb.Deposit_Data{
PublicKey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: signature,
})
if err != nil {
return errors.Wrap(err, "could not verify deposit signature")
}
if !valid {
log.WithFields(logrus.Fields{
"pubkey": fmt.Sprintf("%x", pubkey),
}).Warn("ignoring builder deposit: invalid signature")
return nil
}
withdrawalCredBytes := bytesutil.ToBytes32(withdrawalCredentials)
return beaconState.AddBuilderFromDeposit(pubkeyBytes, withdrawalCredBytes, amount)
}

View File

@@ -0,0 +1,150 @@
package gloas
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProcessDepositRequests_EmptyAndNil(t *testing.T) {
st := newGloasState(t, nil, nil)
t.Run("empty requests continues", func(t *testing.T) {
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
require.NoError(t, err)
})
t.Run("nil request errors", func(t *testing.T) {
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
require.ErrorContains(t, "nil deposit request", err)
})
}
func TestProcessDepositRequest_BuilderDepositAddsBuilder(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
cred := builderWithdrawalCredentials()
pd := stateTesting.GeneratePendingDeposit(t, sk, 1234, cred, 0)
req := depositRequestFromPending(pd, 1)
st := newGloasState(t, nil, nil)
err = processDepositRequest(st, req)
require.NoError(t, err)
idx, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
require.Equal(t, true, ok)
builder, err := st.Builder(idx)
require.NoError(t, err)
require.NotNil(t, builder)
require.DeepEqual(t, req.Pubkey, builder.Pubkey)
require.DeepEqual(t, []byte{cred[0]}, builder.Version)
require.DeepEqual(t, cred[12:], builder.ExecutionAddress)
require.Equal(t, uint64(1234), uint64(builder.Balance))
require.Equal(t, params.BeaconConfig().FarFutureEpoch, builder.WithdrawableEpoch)
pending, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pending))
}
func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
pubkey := sk.PublicKey().Marshal()
builders := []*ethpb.Builder{
{
Pubkey: pubkey,
Version: []byte{0},
ExecutionAddress: bytes.Repeat([]byte{0x11}, 20),
Balance: 5,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
},
}
st := newGloasState(t, nil, builders)
cred := validatorWithdrawalCredentials()
pd := stateTesting.GeneratePendingDeposit(t, sk, 200, cred, 0)
req := depositRequestFromPending(pd, 9)
err = processDepositRequest(st, req)
require.NoError(t, err)
idx, ok := st.BuilderIndexByPubkey(toBytes48(pubkey))
require.Equal(t, true, ok)
builder, err := st.Builder(idx)
require.NoError(t, err)
require.Equal(t, uint64(205), uint64(builder.Balance))
pending, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pending))
}
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
sk, err := bls.RandKey()
require.NoError(t, err)
cred := builderWithdrawalCredentials()
st := newGloasState(t, nil, nil)
err = applyDepositForNewBuilder(st, sk.PublicKey().Marshal(), cred[:], 100, make([]byte, 96))
require.NoError(t, err)
_, ok := st.BuilderIndexByPubkey(toBytes48(sk.PublicKey().Marshal()))
require.Equal(t, false, ok)
}
func newGloasState(t *testing.T, validators []*ethpb.Validator, builders []*ethpb.Builder) state.BeaconState {
t.Helper()
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
Validators: validators,
Balances: make([]uint64, len(validators)),
PendingDeposits: []*ethpb.PendingDeposit{},
Builders: builders,
})
require.NoError(t, err)
return st
}
func depositRequestFromPending(pd *ethpb.PendingDeposit, index uint64) *enginev1.DepositRequest {
return &enginev1.DepositRequest{
Pubkey: pd.PublicKey,
WithdrawalCredentials: pd.WithdrawalCredentials,
Amount: pd.Amount,
Signature: pd.Signature,
Index: index,
}
}
func builderWithdrawalCredentials() [32]byte {
var cred [32]byte
cred[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
copy(cred[12:], bytes.Repeat([]byte{0x22}, 20))
return cred
}
func validatorWithdrawalCredentials() [32]byte {
var cred [32]byte
cred[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
copy(cred[12:], bytes.Repeat([]byte{0x33}, 20))
return cred
}
func toBytes48(b []byte) [48]byte {
var out [48]byte
copy(out[:], b)
return out
}

View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package gloas
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/core/gloas")

View File

@@ -0,0 +1,377 @@
package gloas
import (
"bytes"
"context"
"fmt"
requests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
//
// <spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
// def process_execution_payload(
// state: BeaconState,
// # [Modified in Gloas:EIP7732]
// # Removed `body`
// # [New in Gloas:EIP7732]
// signed_envelope: SignedExecutionPayloadEnvelope,
// execution_engine: ExecutionEngine,
// # [New in Gloas:EIP7732]
// verify: bool = True,
// ) -> None:
// envelope = signed_envelope.message
// payload = envelope.payload
//
// # Verify signature
// if verify:
// assert verify_execution_payload_envelope_signature(state, signed_envelope)
//
// # Cache latest block header state root
// previous_state_root = hash_tree_root(state)
// if state.latest_block_header.state_root == Root():
// state.latest_block_header.state_root = previous_state_root
//
// # Verify consistency with the beacon block
// assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
// assert envelope.slot == state.slot
//
// # Verify consistency with the committed bid
// committed_bid = state.latest_execution_payload_bid
// assert envelope.builder_index == committed_bid.builder_index
// assert committed_bid.prev_randao == payload.prev_randao
//
// # Verify consistency with expected withdrawals
// assert hash_tree_root(payload.withdrawals) == hash_tree_root(state.payload_expected_withdrawals)
//
// # Verify the gas_limit
// assert committed_bid.gas_limit == payload.gas_limit
// # Verify the block hash
// assert committed_bid.block_hash == payload.block_hash
// # Verify consistency of the parent hash with respect to the previous execution payload
// assert payload.parent_hash == state.latest_block_hash
// # Verify timestamp
// assert payload.timestamp == compute_time_at_slot(state, state.slot)
// # Verify the execution payload is valid
// versioned_hashes = [
// kzg_commitment_to_versioned_hash(commitment)
// # [Modified in Gloas:EIP7732]
// for commitment in committed_bid.blob_kzg_commitments
// ]
// requests = envelope.execution_requests
// assert execution_engine.verify_and_notify_new_payload(
// NewPayloadRequest(
// execution_payload=payload,
// versioned_hashes=versioned_hashes,
// parent_beacon_block_root=state.latest_block_header.parent_root,
// execution_requests=requests,
// )
// )
//
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
//
// for_ops(requests.deposits, process_deposit_request)
// for_ops(requests.withdrawals, process_withdrawal_request)
// for_ops(requests.consolidations, process_consolidation_request)
//
// # Queue the builder payment
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
// amount = payment.withdrawal.amount
// if amount > 0:
// state.builder_pending_withdrawals.append(payment.withdrawal)
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = (
// BuilderPendingPayment()
// )
//
// # Cache the execution payload hash
// state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1
// state.latest_block_hash = payload.block_hash
//
// # Verify the state root
// if verify:
// assert envelope.state_root == hash_tree_root(state)
// </spec>
func ProcessExecutionPayload(
ctx context.Context,
st state.BeaconState,
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
) error {
if err := VerifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
return errors.Wrap(err, "signature verification failed")
}
envelope, err := signedEnvelope.Envelope()
if err != nil {
return errors.Wrap(err, "could not get envelope from signed envelope")
}
if err := ApplyExecutionPayload(ctx, st, envelope); err != nil {
return err
}
r, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not get hash tree root")
}
if r != envelope.StateRoot() {
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
}
return nil
}
// ApplyExecutionPayload applies the execution payload envelope to the state and performs the same
// consistency checks as the full processing path. This keeps the post-payload state root computation
// on a shared code path, even though some bid/payload checks are not strictly required for the root itself.
func ApplyExecutionPayload(
ctx context.Context,
st state.BeaconState,
envelope interfaces.ROExecutionPayloadEnvelope,
) error {
latestHeader := st.LatestBlockHeader()
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
previousStateRoot, err := st.HashTreeRoot(ctx)
if err != nil {
return errors.Wrap(err, "could not compute state root")
}
latestHeader.StateRoot = previousStateRoot[:]
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
return errors.Wrap(err, "could not set latest block header")
}
}
blockHeaderRoot, err := latestHeader.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute block header root")
}
beaconBlockRoot := envelope.BeaconBlockRoot()
if !bytes.Equal(beaconBlockRoot[:], blockHeaderRoot[:]) {
return errors.Errorf("envelope beacon block root does not match state latest block header root: envelope=%#x, header=%#x", beaconBlockRoot, blockHeaderRoot)
}
if envelope.Slot() != st.Slot() {
return errors.Errorf("envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
}
latestBid, err := st.LatestExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "could not get latest execution payload bid")
}
if latestBid == nil {
return errors.New("latest execution payload bid is nil")
}
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
return errors.Errorf("envelope builder index does not match committed bid builder index: envelope=%d, bid=%d", envelope.BuilderIndex(), latestBid.BuilderIndex())
}
payload, err := envelope.Execution()
if err != nil {
return errors.Wrap(err, "could not get execution payload from envelope")
}
latestBidPrevRandao := latestBid.PrevRandao()
if !bytes.Equal(payload.PrevRandao(), latestBidPrevRandao[:]) {
return errors.Errorf("payload prev randao does not match committed bid prev randao: payload=%#x, bid=%#x", payload.PrevRandao(), latestBidPrevRandao)
}
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals from payload")
}
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
if err != nil {
return errors.Wrap(err, "could not validate payload withdrawals")
}
if !ok {
return errors.New("payload withdrawals do not match expected withdrawals")
}
if latestBid.GasLimit() != payload.GasLimit() {
return errors.Errorf("committed bid gas limit does not match payload gas limit: bid=%d, payload=%d", latestBid.GasLimit(), payload.GasLimit())
}
bidBlockHash := latestBid.BlockHash()
payloadBlockHash := payload.BlockHash()
if !bytes.Equal(bidBlockHash[:], payloadBlockHash) {
return errors.Errorf("committed bid block hash does not match payload block hash: bid=%#x, payload=%#x", bidBlockHash, payloadBlockHash)
}
latestBlockHash, err := st.LatestBlockHash()
if err != nil {
return errors.Wrap(err, "could not get latest block hash")
}
if !bytes.Equal(payload.ParentHash(), latestBlockHash[:]) {
return errors.Errorf("payload parent hash does not match state latest block hash: payload=%#x, state=%#x", payload.ParentHash(), latestBlockHash)
}
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
if err != nil {
return errors.Wrap(err, "could not compute timestamp")
}
if payload.Timestamp() != uint64(t.Unix()) {
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
}
if err := ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), [32]byte(payload.BlockHash())); err != nil {
return err
}
return nil
}
func ApplyExecutionPayloadStateMutations(
ctx context.Context,
st state.BeaconState,
executionRequests *enginev1.ExecutionRequests,
blockHash [32]byte,
) error {
if err := processExecutionRequests(ctx, st, executionRequests); err != nil {
return errors.Wrap(err, "could not process execution requests")
}
if err := st.QueueBuilderPayment(); err != nil {
return errors.Wrap(err, "could not queue builder payment")
}
if err := st.SetExecutionPayloadAvailability(st.Slot(), true); err != nil {
return errors.Wrap(err, "could not set execution payload availability")
}
if err := st.SetLatestBlockHash(blockHash); err != nil {
return errors.Wrap(err, "could not set latest block hash")
}
return nil
}
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
return proposerPublicKey(st)
}
return builderPublicKey(st, builderIdx)
}
func proposerPublicKey(st state.BeaconState) (bls.PublicKey, error) {
header := st.LatestBlockHeader()
if header == nil {
return nil, fmt.Errorf("latest block header is nil")
}
proposerPubkey := st.PubkeyAtIndex(header.ProposerIndex)
publicKey, err := bls.PublicKeyFromBytes(proposerPubkey[:])
if err != nil {
return nil, fmt.Errorf("invalid proposer public key: %w", err)
}
return publicKey, nil
}
func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
builder, err := st.Builder(builderIdx)
if err != nil {
return nil, fmt.Errorf("failed to get builder: %w", err)
}
if builder == nil {
return nil, fmt.Errorf("builder at index %d not found", builderIdx)
}
publicKey, err := bls.PublicKeyFromBytes(builder.Pubkey)
if err != nil {
return nil, fmt.Errorf("invalid builder public key: %w", err)
}
return publicKey, nil
}
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
// Spec v1.7.0-alpha.0 (pseudocode):
// for op in requests.deposits: process_deposit_request(state, op)
// for op in requests.withdrawals: process_withdrawal_request(state, op)
// for op in requests.consolidations: process_consolidation_request(state, op)
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
return errors.Wrap(err, "could not process deposit requests")
}
var err error
st, err = requests.ProcessWithdrawalRequests(ctx, st, rqs.Withdrawals)
if err != nil {
return errors.Wrap(err, "could not process withdrawal requests")
}
err = requests.ProcessConsolidationRequests(ctx, st, rqs.Consolidations)
if err != nil {
return errors.Wrap(err, "could not process consolidation requests")
}
return nil
}
// VerifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
// def verify_execution_payload_envelope_signature(
//
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
//
// ) -> bool:
//
// builder_index = signed_envelope.message.builder_index
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// validator_index = state.latest_block_header.proposer_index
// pubkey = state.validators[validator_index].pubkey
// else:
// pubkey = state.builders[builder_index].pubkey
//
// signing_root = compute_signing_root(
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
// )
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
//
// </spec>
func VerifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
envelope, err := signedEnvelope.Envelope()
if err != nil {
return fmt.Errorf("failed to get envelope: %w", err)
}
builderIdx := envelope.BuilderIndex()
publicKey, err := envelopePublicKey(st, builderIdx)
if err != nil {
return err
}
signatureBytes := signedEnvelope.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return fmt.Errorf("invalid signature format: %w", err)
}
currentEpoch := slots.ToEpoch(envelope.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return fmt.Errorf("failed to compute signing domain: %w", err)
}
signingRoot, err := signedEnvelope.SigningRoot(domain)
if err != nil {
return fmt.Errorf("failed to compute signing root: %w", err)
}
if !signature.Verify(publicKey, signingRoot[:]) {
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
}
return nil
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
stderrors "errors"
"fmt"
"slices"
@@ -23,15 +24,24 @@ import (
"github.com/pkg/errors"
)
var ErrValidatorNotInPTC = stderrors.New("validator not in PTC")
// ProcessPayloadAttestations validates payload attestations in a block body.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
//
// data = payload_attestation.data
// assert data.beacon_block_root == state.latest_block_header.parent_root
// assert data.slot + 1 == state.slot
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed)
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
// def process_payload_attestation(
// state: BeaconState, payload_attestation: PayloadAttestation
// ) -> None:
// data = payload_attestation.data
//
// # Check that the attestation is for the parent beacon block
// assert data.beacon_block_root == state.latest_block_header.parent_root
// # Check that the attestation is for the previous slot
// assert data.slot + 1 == state.slot
// # Verify signature
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
// </spec>
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
atts, err := body.PayloadAttestations()
if err != nil {
@@ -70,7 +80,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
// indexedPayloadAttestation converts a payload attestation into its indexed form.
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
if err != nil {
return nil, err
}
@@ -89,19 +99,26 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}, nil
}
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
// Spec v1.7.0-alpha.0 (pseudocode):
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
//
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices = []
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// """
// Get the payload timeliness committee for the given ``slot``.
// """
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices: List[ValidatorIndex] = []
// # Concatenate all committees for this slot in order
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
if err != nil {
@@ -114,17 +131,50 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
}
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
var i uint64
for uint64(len(selected)) < fieldparams.PTCSize {
if ctx.Err() != nil {
return nil, ctx.Err()
}
for committeeIndex := primitives.CommitteeIndex(0); committeeIndex < primitives.CommitteeIndex(committeesPerSlot); committeeIndex++ {
if uint64(len(selected)) >= fieldparams.PTCSize {
break
}
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, committeeIndex)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", committeeIndex)
}
selected, i, err = selectByBalanceFill(ctx, st, committee, seed, selected, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", committeeIndex)
}
}
out = append(out, committee...)
}
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
return selected, nil
}
// PayloadCommitteeIndex returns the validator's index position in the payload committee for a slot.
func PayloadCommitteeIndex(
ctx context.Context,
st state.ReadOnlyBeaconState,
slot primitives.Slot,
validatorIndex primitives.ValidatorIndex,
) (uint64, error) {
ptc, err := PayloadCommittee(ctx, st, slot)
if err != nil {
return 0, err
}
idx := slices.Index(ptc, validatorIndex)
if idx == -1 {
return 0, fmt.Errorf("%w: validator=%d slot=%d", ErrValidatorNotInPTC, validatorIndex, slot)
}
return uint64(idx), nil
}
// ptcSeed computes the seed for the payload timeliness committee.
@@ -137,56 +187,87 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
}
// selectByBalance selects a balance-weighted subset of input candidates.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
// Note: shuffle_indices is false for PTC.
//
// total = len(indices); selected = []; i = 0
// while len(selected) < size:
// next = i % total
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
if len(candidates) == 0 {
return nil, errors.New("no candidates for balance weighted selection")
}
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
// def compute_balance_weighted_selection(
// state: BeaconState,
// indices: Sequence[ValidatorIndex],
// seed: Bytes32,
// size: uint64,
// shuffle_indices: bool,
// ) -> Sequence[ValidatorIndex]:
// """
// Return ``size`` indices sampled by effective balance, using ``indices``
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
// are themselves sampled from ``indices`` by shuffling it, otherwise
// ``indices`` is traversed in order.
// """
// total = uint64(len(indices))
// assert total > 0
// selected: List[ValidatorIndex] = []
// i = uint64(0)
// while len(selected) < size:
// next_index = i % total
// if shuffle_indices:
// next_index = compute_shuffled_index(next_index, total, seed)
// candidate_index = indices[next_index]
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
// selected.append(candidate_index)
// i += 1
// return selected
// </spec>
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
candidates []primitives.ValidatorIndex,
seed [32]byte,
selected []primitives.ValidatorIndex,
i uint64,
) ([]primitives.ValidatorIndex, uint64, error) {
hashFunc := hash.CustomSHA256Hasher()
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
var buf [40]byte
copy(buf[:], seed[:])
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
selected := make([]primitives.ValidatorIndex, 0, count)
total := uint64(len(candidates))
for i := uint64(0); uint64(len(selected)) < count; i++ {
for _, idx := range candidates {
if ctx.Err() != nil {
return nil, ctx.Err()
return nil, i, ctx.Err()
}
idx := candidates[i%total]
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
if err != nil {
return nil, err
return nil, i, err
}
if ok {
selected = append(selected, idx)
}
if uint64(len(selected)) == fieldparams.PTCSize {
break
}
i++
}
return selected, nil
return selected, i, nil
}
// acceptByBalance determines if a validator is accepted based on its effective balance.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_acceptance(state, index, seed, i):
//
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
// def compute_balance_weighted_acceptance(
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
// ) -> bool:
// """
// Return whether to accept the selection of the validator ``index``, with probability
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
// """
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
// </spec>
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
@@ -203,16 +284,26 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
// Spec v1.7.0-alpha.0 (pseudocode):
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
//
// indices = indexed_payload_attestation.attesting_indices
// return len(indices) > 0 and indices == sorted(indices) and
// bls.FastAggregateVerify(
// [state.validators[i].pubkey for i in indices],
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
// indexed_payload_attestation.signature,
// )
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
// def is_valid_indexed_payload_attestation(
// state: BeaconState, attestation: IndexedPayloadAttestation
// ) -> bool:
// """
// Check if ``attestation`` is non-empty, has sorted indices, and has
// a valid aggregate signature.
// """
// # Verify indices are non-empty and sorted
// indices = attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(indices):
// return False
//
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
// signing_root = compute_signing_root(attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
// </spec>
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices
if len(indices) == 0 || !slices.IsSorted(indices) {

View File

@@ -0,0 +1,366 @@
package gloas
import (
"bytes"
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/time/slots"
"google.golang.org/protobuf/proto"
)
type payloadFixture struct {
state state.BeaconState
signed interfaces.ROSignedExecutionPayloadEnvelope
signedProto *ethpb.SignedExecutionPayloadEnvelope
envelope *ethpb.ExecutionPayloadEnvelope
payload *enginev1.ExecutionPayloadDeneb
slot primitives.Slot
}
func buildPayloadFixture(t *testing.T, mutate func(payload *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, envelope *ethpb.ExecutionPayloadEnvelope)) payloadFixture {
t.Helper()
cfg := params.BeaconConfig()
slot := primitives.Slot(5)
builderIdx := primitives.BuilderIndex(0)
sk, err := bls.RandKey()
require.NoError(t, err)
pk := sk.PublicKey().Marshal()
randao := bytes.Repeat([]byte{0xAA}, 32)
parentHash := bytes.Repeat([]byte{0xBB}, 32)
blockHash := bytes.Repeat([]byte{0xCC}, 32)
withdrawals := []*enginev1.Withdrawal{
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 0},
}
payload := &enginev1.ExecutionPayloadDeneb{
ParentHash: parentHash,
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
StateRoot: bytes.Repeat([]byte{0x02}, 32),
ReceiptsRoot: bytes.Repeat([]byte{0x03}, 32),
LogsBloom: bytes.Repeat([]byte{0x04}, 256),
PrevRandao: randao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 0,
Timestamp: 100,
ExtraData: []byte{},
BaseFeePerGas: bytes.Repeat([]byte{0x05}, 32),
BlockHash: blockHash,
Transactions: [][]byte{},
Withdrawals: withdrawals,
BlobGasUsed: 0,
ExcessBlobGas: 0,
}
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: parentHash,
ParentBlockRoot: bytes.Repeat([]byte{0xDD}, 32),
BlockHash: blockHash,
PrevRandao: randao,
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
FeeRecipient: bytes.Repeat([]byte{0xEE}, 20),
}
header := &ethpb.BeaconBlockHeader{
Slot: slot,
ParentRoot: bytes.Repeat([]byte{0x11}, 32),
StateRoot: bytes.Repeat([]byte{0x22}, 32),
BodyRoot: bytes.Repeat([]byte{0x33}, 32),
}
headerRoot, err := header.HashTreeRoot()
require.NoError(t, err)
envelope := &ethpb.ExecutionPayloadEnvelope{
Slot: slot,
BuilderIndex: builderIdx,
BeaconBlockRoot: headerRoot[:],
Payload: payload,
ExecutionRequests: &enginev1.ExecutionRequests{},
}
if mutate != nil {
mutate(payload, bid, envelope)
}
genesisRoot := bytes.Repeat([]byte{0xAB}, 32)
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range blockRoots {
blockRoots[i] = bytes.Repeat([]byte{0x44}, 32)
stateRoots[i] = bytes.Repeat([]byte{0x55}, 32)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = randao
}
withdrawalCreds := make([]byte, 32)
withdrawalCreds[0] = cfg.ETH1AddressWithdrawalPrefixByte
eth1Data := &ethpb.Eth1Data{
DepositRoot: bytes.Repeat([]byte{0x66}, 32),
DepositCount: 0,
BlockHash: bytes.Repeat([]byte{0x77}, 32),
}
vals := []*ethpb.Validator{
{
PublicKey: pk,
WithdrawalCredentials: withdrawalCreds,
EffectiveBalance: cfg.MinActivationBalance + 1_000,
},
}
balances := []uint64{cfg.MinActivationBalance + 1_000}
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
for i := range payments {
payments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
executionPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
builders := make([]*ethpb.Builder, builderIdx+1)
builders[builderIdx] = &ethpb.Builder{
Pubkey: pk,
Version: []byte{0},
ExecutionAddress: bytes.Repeat([]byte{0x09}, 20),
Balance: 0,
DepositEpoch: 0,
WithdrawableEpoch: 0,
}
genesisTime := uint64(0)
slotSeconds := cfg.SecondsPerSlot * uint64(slot)
if payload.Timestamp > slotSeconds {
genesisTime = payload.Timestamp - slotSeconds
}
stProto := &ethpb.BeaconStateGloas{
Slot: slot,
GenesisTime: genesisTime,
GenesisValidatorsRoot: genesisRoot,
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
Epoch: 0,
},
LatestBlockHeader: header,
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
Eth1Data: eth1Data,
Validators: vals,
Balances: balances,
LatestBlockHash: payload.ParentHash,
LatestExecutionPayloadBid: bid,
BuilderPendingPayments: payments,
ExecutionPayloadAvailability: executionPayloadAvailability,
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
PayloadExpectedWithdrawals: payload.Withdrawals,
Builders: builders,
}
st, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
expected := st.Copy()
ctx := context.Background()
require.NoError(t, processExecutionRequests(ctx, expected, envelope.ExecutionRequests))
require.NoError(t, expected.QueueBuilderPayment())
require.NoError(t, expected.SetExecutionPayloadAvailability(slot, true))
var blockHashArr [32]byte
copy(blockHashArr[:], payload.BlockHash)
require.NoError(t, expected.SetLatestBlockHash(blockHashArr))
expectedRoot, err := expected.HashTreeRoot(ctx)
require.NoError(t, err)
envelope.StateRoot = expectedRoot[:]
epoch := slots.ToEpoch(slot)
domain, err := signing.Domain(st.Fork(), epoch, cfg.DomainBeaconBuilder, st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(envelope, domain)
require.NoError(t, err)
signature := sk.Sign(signingRoot[:]).Marshal()
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: envelope,
Signature: signature,
}
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
return payloadFixture{
state: st,
signed: signed,
signedProto: signedProto,
envelope: envelope,
payload: payload,
slot: slot,
}
}
func TestProcessExecutionPayload_Success(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
require.NoError(t, ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
var expectedHash [32]byte
copy(expectedHash[:], fixture.payload.BlockHash)
require.Equal(t, expectedHash, latestHash)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
payments, err := fixture.state.BuilderPendingPayments()
require.NoError(t, err)
payment := payments[paymentIndex]
require.NotNil(t, payment)
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
newHash := [32]byte{}
newHash[0] = 0x99
require.NoError(t, ApplyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
latestHash, err := fixture.state.LatestBlockHash()
require.NoError(t, err)
require.Equal(t, newHash, latestHash)
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
require.NoError(t, err)
require.Equal(t, uint64(1), available)
}
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
})
err := ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed)
require.ErrorContains(t, "prev randao", err)
}
func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
require.NoError(t, fixture.state.QueueBuilderPayment())
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
payments, err := fixture.state.BuilderPendingPayments()
require.NoError(t, err)
payment := payments[paymentIndex]
require.NotNil(t, payment)
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
}
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
fixture := buildPayloadFixture(t, nil)
t.Run("self build", func(t *testing.T) {
proposerSk, err := bls.RandKey()
require.NoError(t, err)
proposerPk := proposerSk.PublicKey().Marshal()
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
stPb.Validators[0].PublicKey = proposerPk
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
require.NoError(t, err)
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
epoch := slots.ToEpoch(msg.Slot)
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(msg, domain)
require.NoError(t, err)
signature := proposerSk.Sign(signingRoot[:]).Marshal()
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: msg,
Signature: signature,
}
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(st, signed))
})
t.Run("builder", func(t *testing.T) {
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
require.NoError(t, err)
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
})
t.Run("invalid signature", func(t *testing.T) {
t.Run("self build", func(t *testing.T) {
proposerSk, err := bls.RandKey()
require.NoError(t, err)
proposerPk := proposerSk.PublicKey().Marshal()
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
stPb.Validators[0].PublicKey = proposerPk
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
require.NoError(t, err)
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: msg,
Signature: bytes.Repeat([]byte{0xFF}, 96),
}
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = VerifyExecutionPayloadEnvelopeSignature(st, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
t.Run("builder", func(t *testing.T) {
signedProto := &ethpb.SignedExecutionPayloadEnvelope{
Message: fixture.signedProto.Message,
Signature: bytes.Repeat([]byte{0xFF}, 96),
}
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
require.NoError(t, err)
err = VerifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
require.ErrorContains(t, "invalid signature format", err)
})
})
}

View File

@@ -10,17 +10,21 @@ import (
)
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
// Spec v1.7.0-alpha.0 (pseudocode):
// def process_builder_pending_payments(state: BeaconState) -> None:
//
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
// def process_builder_pending_payments(state: BeaconState) -> None:
// """
// Processes the builder pending payments from the previous epoch.
// """
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
// </spec>
func ProcessBuilderPendingPayments(state state.BeaconState) error {
quorum, err := builderQuorumThreshold(state)
if err != nil {
@@ -53,12 +57,16 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
}
// builderQuorumThreshold calculates the quorum threshold for builder payments.
// Spec v1.7.0-alpha.0 (pseudocode):
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
//
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
// """
// Calculate the quorum threshold for builder payments.
// """
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
// </spec>
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {

View File

@@ -11,16 +11,20 @@ import (
)
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
// Spec v1.7.0 (pseudocode):
//
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
// # [New in Gloas:EIP7732]
// # Remove the BuilderPendingPayment corresponding to
// # this proposal if it is still in the 2-epoch window.
// slot = header_1.slot
// proposal_epoch = compute_epoch_at_slot(slot)
// if proposal_epoch == get_current_epoch(state):
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// elif proposal_epoch == get_previous_epoch(state):
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// </spec>
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
proposalEpoch := slots.ToEpoch(header.Slot)
currentEpoch := time.CurrentEpoch(st)

View File

@@ -0,0 +1,306 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
// UpgradeToGloas updates inputs a generic state to return the version Gloas state.
//
// <spec fn="upgrade_to_gloas" fork="gloas" hash="6e66df25">
// def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState:
// epoch = fulu.get_current_epoch(pre)
//
// post = BeaconState(
// genesis_time=pre.genesis_time,
// genesis_validators_root=pre.genesis_validators_root,
// slot=pre.slot,
// fork=Fork(
// previous_version=pre.fork.current_version,
// # [Modified in Gloas:EIP7732]
// current_version=GLOAS_FORK_VERSION,
// epoch=epoch,
// ),
// latest_block_header=pre.latest_block_header,
// block_roots=pre.block_roots,
// state_roots=pre.state_roots,
// historical_roots=pre.historical_roots,
// eth1_data=pre.eth1_data,
// eth1_data_votes=pre.eth1_data_votes,
// eth1_deposit_index=pre.eth1_deposit_index,
// validators=pre.validators,
// balances=pre.balances,
// randao_mixes=pre.randao_mixes,
// slashings=pre.slashings,
// previous_epoch_participation=pre.previous_epoch_participation,
// current_epoch_participation=pre.current_epoch_participation,
// justification_bits=pre.justification_bits,
// previous_justified_checkpoint=pre.previous_justified_checkpoint,
// current_justified_checkpoint=pre.current_justified_checkpoint,
// finalized_checkpoint=pre.finalized_checkpoint,
// inactivity_scores=pre.inactivity_scores,
// current_sync_committee=pre.current_sync_committee,
// next_sync_committee=pre.next_sync_committee,
// # [Modified in Gloas:EIP7732]
// # Removed `latest_execution_payload_header`
// # [New in Gloas:EIP7732]
// latest_execution_payload_bid=ExecutionPayloadBid(
// block_hash=pre.latest_execution_payload_header.block_hash,
// ),
// next_withdrawal_index=pre.next_withdrawal_index,
// next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
// historical_summaries=pre.historical_summaries,
// deposit_requests_start_index=pre.deposit_requests_start_index,
// deposit_balance_to_consume=pre.deposit_balance_to_consume,
// exit_balance_to_consume=pre.exit_balance_to_consume,
// earliest_exit_epoch=pre.earliest_exit_epoch,
// consolidation_balance_to_consume=pre.consolidation_balance_to_consume,
// earliest_consolidation_epoch=pre.earliest_consolidation_epoch,
// pending_deposits=pre.pending_deposits,
// pending_partial_withdrawals=pre.pending_partial_withdrawals,
// pending_consolidations=pre.pending_consolidations,
// proposer_lookahead=pre.proposer_lookahead,
// # [New in Gloas:EIP7732]
// builders=[],
// # [New in Gloas:EIP7732]
// next_withdrawal_builder_index=BuilderIndex(0),
// # [New in Gloas:EIP7732]
// execution_payload_availability=[0b1 for _ in range(SLOTS_PER_HISTORICAL_ROOT)],
// # [New in Gloas:EIP7732]
// builder_pending_payments=[BuilderPendingPayment() for _ in range(2 * SLOTS_PER_EPOCH)],
// # [New in Gloas:EIP7732]
// builder_pending_withdrawals=[],
// # [New in Gloas:EIP7732]
// latest_block_hash=pre.latest_execution_payload_header.block_hash,
// # [New in Gloas:EIP7732]
// payload_expected_withdrawals=[],
// )
//
// # [New in Gloas:EIP7732]
// onboard_builders_from_pending_deposits(post)
//
// return post
// </spec>
//
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
//
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
// else:
// # Verify that the builder is active
// assert is_active_builder(state, builder_index)
// # Verify that the builder has funds to cover the bid
// assert can_builder_cover_bid(state, builder_index, amount)
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify commitments are under limit
// assert (
// len(bid.blob_kzg_commitments)
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
// )
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
//
// # Record the pending payment if there is some payment
// if amount > 0:
// pending_payment = BuilderPendingPayment(
// weight=0,
// withdrawal=BuilderPendingWithdrawal(
// fee_recipient=bid.fee_recipient,
// amount=amount,
// builder_index=builder_index,
// ),
// )
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
// pending_payment
// )
//
// # Cache the signed execution payload bid
// state.latest_execution_payload_bid = bid
// </spec>
func UpgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
s, err := upgradeToGloas(beaconState)
if err != nil {
return nil, errors.Wrap(err, "could not convert to gloas")
}
if err := s.OnboardBuildersFromPendingDeposits(); err != nil {
return nil, errors.Wrap(err, "failed to onboard builders from pending deposits")
}
return s, nil
}
func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := beaconState.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
wi, err := beaconState.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := beaconState.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := beaconState.HistoricalSummaries()
if err != nil {
return nil, err
}
depositRequestsStartIndex, err := beaconState.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
if err != nil {
return nil, err
}
exitBalanceToConsume, err := beaconState.ExitBalanceToConsume()
if err != nil {
return nil, err
}
earliestExitEpoch, err := beaconState.EarliestExitEpoch()
if err != nil {
return nil, err
}
consolidationBalanceToConsume, err := beaconState.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
earliestConsolidationEpoch, err := beaconState.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pendingDeposits, err := beaconState.PendingDeposits()
if err != nil {
return nil, err
}
pendingPartialWithdrawals, err := beaconState.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pendingConsolidations, err := beaconState.PendingConsolidations()
if err != nil {
return nil, err
}
proposerLookahead, err := beaconState.ProposerLookahead()
if err != nil {
return nil, err
}
proposerLookaheadU64 := make([]uint64, len(proposerLookahead))
for i, v := range proposerLookahead {
proposerLookaheadU64[i] = uint64(v)
}
executionPayloadAvailability := make([]byte, int((params.BeaconConfig().SlotsPerHistoricalRoot+7)/8))
for i := range executionPayloadAvailability {
executionPayloadAvailability[i] = 0xff
}
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, int(params.BeaconConfig().SlotsPerEpoch*2))
for i := range builderPendingPayments {
builderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
},
}
}
s := &ethpb.BeaconStateGloas{
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
Slot: beaconState.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: beaconState.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().GloasForkVersion,
Epoch: time.CurrentEpoch(beaconState),
},
LatestBlockHeader: beaconState.LatestBlockHeader(),
BlockRoots: beaconState.BlockRoots(),
StateRoots: beaconState.StateRoots(),
HistoricalRoots: beaconState.HistoricalRoots(),
Eth1Data: beaconState.Eth1Data(),
Eth1DataVotes: beaconState.Eth1DataVotes(),
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
Validators: beaconState.Validators(),
Balances: beaconState.Balances(),
RandaoMixes: beaconState.RandaoMixes(),
Slashings: beaconState.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: beaconState.JustificationBits(),
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: payloadHeader.BlockHash(),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
ParentBlockHash: make([]byte, fieldparams.RootLength),
ParentBlockRoot: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositRequestsStartIndex: depositRequestsStartIndex,
DepositBalanceToConsume: depositBalanceToConsume,
ExitBalanceToConsume: exitBalanceToConsume,
EarliestExitEpoch: earliestExitEpoch,
ConsolidationBalanceToConsume: consolidationBalanceToConsume,
EarliestConsolidationEpoch: earliestConsolidationEpoch,
PendingDeposits: pendingDeposits,
PendingPartialWithdrawals: pendingPartialWithdrawals,
PendingConsolidations: pendingConsolidations,
ProposerLookahead: proposerLookaheadU64,
Builders: []*ethpb.Builder{},
NextWithdrawalBuilderIndex: primitives.BuilderIndex(0),
ExecutionPayloadAvailability: executionPayloadAvailability,
BuilderPendingPayments: builderPendingPayments,
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
LatestBlockHash: payloadHeader.BlockHash(),
PayloadExpectedWithdrawals: []*enginev1.Withdrawal{},
}
return state_native.InitializeFromProtoUnsafeGloas(s)
}

View File

@@ -0,0 +1,177 @@
package gloas_test
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
func TestUpgradeToGloas_Basic(t *testing.T) {
st, _ := util.DeterministicGenesisStateFulu(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
lookaheadSize := int(params.BeaconConfig().MinSeedLookahead+1) * int(params.BeaconConfig().SlotsPerEpoch)
lookahead := make([]primitives.ValidatorIndex, lookaheadSize)
for i := range lookahead {
lookahead[i] = primitives.ValidatorIndex(i)
}
require.NoError(t, st.SetProposerLookahead(lookahead))
require.NoError(t, st.SetPendingPartialWithdrawals([]*ethpb.PendingPartialWithdrawal{{Index: 1, Amount: 2}}))
require.NoError(t, st.SetPendingConsolidations([]*ethpb.PendingConsolidation{{SourceIndex: 3, TargetIndex: 4}}))
blockHash := bytes.Repeat([]byte{0xAB}, 32)
header := &enginev1.ExecutionPayloadHeaderDeneb{BlockHash: blockHash}
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeaderDeneb(header)
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))
preForkState := st.Copy()
mSt, err := gloas.UpgradeToGloas(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().GloasForkVersion,
Epoch: time.CurrentEpoch(st),
}, mSt.Fork())
bid, err := mSt.LatestExecutionPayloadBid()
require.NoError(t, err)
wantBlockHash := [32]byte{}
copy(wantBlockHash[:], blockHash)
require.DeepSSZEqual(t, wantBlockHash, bid.BlockHash())
require.DeepSSZEqual(t, [20]byte{}, bid.FeeRecipient())
require.DeepSSZEqual(t, [32]byte{}, bid.ParentBlockHash())
require.DeepSSZEqual(t, [32]byte{}, bid.ParentBlockRoot())
require.DeepSSZEqual(t, [32]byte{}, bid.PrevRandao())
latestBlockHash, err := mSt.LatestBlockHash()
require.NoError(t, err)
require.DeepSSZEqual(t, blockHash, latestBlockHash[:])
pbState, ok := mSt.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
expectedAvailLen := int((params.BeaconConfig().SlotsPerHistoricalRoot + 7) / 8)
require.Equal(t, expectedAvailLen, len(pbState.ExecutionPayloadAvailability))
for _, b := range pbState.ExecutionPayloadAvailability {
require.Equal(t, byte(0xff), b)
}
require.Equal(t, 0, len(pbState.Builders))
require.Equal(t, primitives.BuilderIndex(0), pbState.NextWithdrawalBuilderIndex)
require.Equal(t, 0, len(pbState.BuilderPendingWithdrawals))
require.Equal(t, 0, len(pbState.PayloadExpectedWithdrawals))
require.Equal(t, int(params.BeaconConfig().SlotsPerEpoch*2), len(pbState.BuilderPendingPayments))
for _, payment := range pbState.BuilderPendingPayments {
require.NotNil(t, payment)
require.NotNil(t, payment.Withdrawal)
require.Equal(t, fieldparams.FeeRecipientLength, len(payment.Withdrawal.FeeRecipient))
}
ppw, err := mSt.PendingPartialWithdrawals()
require.NoError(t, err)
prePPW, err := preForkState.PendingPartialWithdrawals()
require.NoError(t, err)
require.DeepSSZEqual(t, prePPW, ppw)
pc, err := mSt.PendingConsolidations()
require.NoError(t, err)
prePC, err := preForkState.PendingConsolidations()
require.NoError(t, err)
require.DeepSSZEqual(t, prePC, pc)
}
func TestUpgradeToGloas_OnboardsBuilderDeposit(t *testing.T) {
st, _ := util.DeterministicGenesisStateFulu(t, 4)
sk, err := bls.RandKey()
require.NoError(t, err)
builderCreds := builderWithdrawalCredentials(0xDD)
amount := uint64(1234)
depSlot := primitives.Slot(params.BeaconConfig().SlotsPerEpoch*2 + 3)
deposit := newPendingDeposit(t, sk, builderCreds, amount, depSlot, true)
require.NoError(t, st.SetPendingDeposits([]*ethpb.PendingDeposit{deposit}))
mSt, err := gloas.UpgradeToGloas(st)
require.NoError(t, err)
pbState, ok := mSt.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
require.Equal(t, 0, len(pbState.PendingDeposits))
require.Equal(t, 1, len(pbState.Builders))
builder := pbState.Builders[0]
require.DeepSSZEqual(t, sk.PublicKey().Marshal(), builder.Pubkey)
require.DeepSSZEqual(t, builderCreds[12:], builder.ExecutionAddress)
require.Equal(t, primitives.Gwei(amount), builder.Balance)
require.Equal(t, slots.ToEpoch(depSlot), builder.DepositEpoch)
}
func builderWithdrawalCredentials(addrByte byte) []byte {
wc := make([]byte, fieldparams.RootLength)
wc[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
for i := 12; i < len(wc); i++ {
wc[i] = addrByte
}
return wc
}
func newPendingDeposit(
t *testing.T,
sk bls.SecretKey,
withdrawalCredentials []byte,
amount uint64,
slot primitives.Slot,
valid bool,
) *ethpb.PendingDeposit {
t.Helper()
signature := make([]byte, fieldparams.BLSSignatureLength)
if valid {
signature = signDeposit(t, sk, withdrawalCredentials, amount)
}
return &ethpb.PendingDeposit{
PublicKey: sk.PublicKey().Marshal(),
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: signature,
Slot: slot,
}
}
func signDeposit(t *testing.T, sk bls.SecretKey, withdrawalCredentials []byte, amount uint64) []byte {
t.Helper()
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
require.NoError(t, err)
msg := &ethpb.DepositMessage{
PublicKey: sk.PublicKey().Marshal(),
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
}
signingRoot, err := signing.ComputeSigningRoot(msg, domain)
require.NoError(t, err)
sig := sk.Sign(signingRoot[:])
return sig.Marshal()
}

View File

@@ -0,0 +1,105 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
// ProcessWithdrawals applies withdrawals to the state for Gloas.
//
// <spec fn="process_withdrawals" fork="gloas" hash="16d9ad2a">
// def process_withdrawals(
//
// state: BeaconState,
// # [Modified in Gloas:EIP7732]
// # Removed `payload`
//
// ) -> None:
//
// # [New in Gloas:EIP7732]
// # Return early if the parent block is empty
// if not is_parent_block_full(state):
// return
//
// # Get expected withdrawals
// expected = get_expected_withdrawals(state)
//
// # Apply expected withdrawals
// apply_withdrawals(state, expected.withdrawals)
//
// # Update withdrawals fields in the state
// update_next_withdrawal_index(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_payload_expected_withdrawals(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
// # [New in Gloas:EIP7732]
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
// update_next_withdrawal_validator_index(state, expected.withdrawals)
//
// </spec>
func ProcessWithdrawals(st state.BeaconState) error {
// Must be called before ProcessExecutionPayloadBid for the current block.
full, err := st.IsParentBlockFull()
if err != nil {
return errors.Wrap(err, "could not get parent block full status")
}
if !full {
return nil
}
expected, err := st.ExpectedWithdrawalsGloas()
if err != nil {
return errors.Wrap(err, "could not get expected withdrawals")
}
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
return errors.Wrap(err, "could not decrease withdrawal balances")
}
if len(expected.Withdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
return errors.Wrap(err, "could not set next withdrawal index")
}
}
if err := st.SetPayloadExpectedWithdrawals(expected.Withdrawals); err != nil {
return errors.Wrap(err, "could not set payload expected withdrawals")
}
if err := st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount); err != nil {
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
}
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
}
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
if err != nil {
return errors.Wrap(err, "could not set next withdrawal builder index")
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return errors.Wrap(err, "could not set next withdrawal validator index")
}
return nil
}

View File

@@ -0,0 +1,388 @@
package gloas
import (
"errors"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProcessWithdrawals(t *testing.T) {
cases := []struct {
name string
build func(t *testing.T) *withdrawalsState
check func(t *testing.T, st *withdrawalsState)
}{
{
name: "parent block not full",
build: func(t *testing.T) *withdrawalsState {
return &withdrawalsState{
BeaconState: newGloasState(t, nil, nil),
parentFull: false,
}
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, false, st.expectedCalled)
require.Equal(t, false, st.decreaseCalled)
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
require.Equal(t, false, st.setPayloadExpectedWithdrawalsCalled)
require.Equal(t, false, st.dequeueBuilderCalled)
require.Equal(t, false, st.dequeuePartialCalled)
require.Equal(t, false, st.setNextBuilderIndexCalled)
require.Equal(t, false, st.nextValidatorIndexCalled)
require.Equal(t, false, st.setNextValidatorIndexCalled)
},
},
{
name: "updates indexes when not full payload",
build: func(t *testing.T) *withdrawalsState {
return &withdrawalsState{
BeaconState: newGloasState(t, nil, nil),
parentFull: true,
numValidators: 10,
nextValidatorIndex: 3,
expectedResult: state.ExpectedWithdrawalsGloasResult{
Withdrawals: []*enginev1.Withdrawal{
{Index: 7, ValidatorIndex: 2, Amount: 1, Address: []byte{0x01}},
{Index: 8, ValidatorIndex: 4, Amount: 2, Address: []byte{0x02}},
},
ProcessedBuilderWithdrawalsCount: 5,
ProcessedPartialWithdrawalsCount: 2,
NextWithdrawalBuilderIndex: 7,
},
}
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.expectedCalled)
require.Equal(t, true, st.decreaseCalled)
require.NotNil(t, st.setNextWithdrawalIndexArg)
require.Equal(t, uint64(9), *st.setNextWithdrawalIndexArg)
require.DeepEqual(t, st.expectedResult.Withdrawals, st.setPayloadExpectedWithdrawalsArg)
require.Equal(t, uint64(5), *st.dequeueBuilderArg)
require.Equal(t, uint64(2), *st.dequeuePartialArg)
require.Equal(t, primitives.BuilderIndex(7), *st.setNextBuilderIndexArg)
require.Equal(t, true, st.nextValidatorIndexCalled)
expectedNext := (uint64(st.nextValidatorIndex) + uint64(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)) % st.numValidators
require.Equal(t, primitives.ValidatorIndex(expectedNext), *st.setNextValidatorIndexArg)
},
},
{
name: "full payload uses last validator index",
build: func(t *testing.T) *withdrawalsState {
max := int(params.BeaconConfig().MaxWithdrawalsPerPayload)
withdrawals := make([]*enginev1.Withdrawal, max)
for i := range max {
withdrawals[i] = &enginev1.Withdrawal{
Index: uint64(i),
ValidatorIndex: 0,
Amount: 1,
Address: []byte{0x03},
}
}
withdrawals[max-1].ValidatorIndex = 4
return &withdrawalsState{
BeaconState: newGloasState(t, nil, nil),
parentFull: true,
numValidators: 5,
expectedResult: state.ExpectedWithdrawalsGloasResult{
Withdrawals: withdrawals,
NextWithdrawalBuilderIndex: 1,
},
}
},
check: func(t *testing.T, st *withdrawalsState) {
max := int(params.BeaconConfig().MaxWithdrawalsPerPayload)
require.NotNil(t, st.setNextWithdrawalIndexArg)
require.Equal(t, uint64(max), *st.setNextWithdrawalIndexArg)
require.Equal(t, false, st.nextValidatorIndexCalled)
require.Equal(t, primitives.ValidatorIndex(0), *st.setNextValidatorIndexArg)
},
},
{
name: "empty withdrawals skips next index update",
build: func(t *testing.T) *withdrawalsState {
return &withdrawalsState{
BeaconState: newGloasState(t, nil, nil),
parentFull: true,
numValidators: 8,
expectedResult: state.ExpectedWithdrawalsGloasResult{
Withdrawals: []*enginev1.Withdrawal{},
ProcessedBuilderWithdrawalsCount: 1,
ProcessedPartialWithdrawalsCount: 2,
NextWithdrawalBuilderIndex: 4,
},
}
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
require.Equal(t, true, st.setPayloadExpectedWithdrawalsCalled)
require.Equal(t, true, st.dequeueBuilderCalled)
require.Equal(t, true, st.dequeuePartialCalled)
require.Equal(t, true, st.setNextBuilderIndexCalled)
require.Equal(t, true, st.nextValidatorIndexCalled)
require.Equal(t, true, st.setNextValidatorIndexCalled)
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
st := tc.build(t)
require.NoError(t, ProcessWithdrawals(st))
if tc.check != nil {
tc.check(t, st)
}
})
}
}
func TestProcessWithdrawals_ErrorPaths(t *testing.T) {
base := func(t *testing.T) *withdrawalsState {
return &withdrawalsState{
BeaconState: newGloasState(t, nil, nil),
parentFull: true,
numValidators: 16,
expectedResult: state.ExpectedWithdrawalsGloasResult{
Withdrawals: []*enginev1.Withdrawal{
{Index: 1, ValidatorIndex: 2, Amount: 1, Address: []byte{0x01}},
},
ProcessedBuilderWithdrawalsCount: 1,
ProcessedPartialWithdrawalsCount: 1,
NextWithdrawalBuilderIndex: 2,
},
nextValidatorIndex: 5,
}
}
cases := []struct {
name string
err error
set func(st *withdrawalsState, err error)
check func(t *testing.T, st *withdrawalsState)
}{
{
name: "parent block full error",
err: errors.New("parent err"),
set: func(st *withdrawalsState, err error) {
st.parentErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, false, st.expectedCalled)
},
},
{
name: "expected withdrawals error",
err: errors.New("expected err"),
set: func(st *withdrawalsState, err error) {
st.expectedErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.expectedCalled)
require.Equal(t, false, st.decreaseCalled)
},
},
{
name: "decrease balances error",
err: errors.New("decrease err"),
set: func(st *withdrawalsState, err error) {
st.decreaseErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.decreaseCalled)
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
},
},
{
name: "set next withdrawal index error",
err: errors.New("next index err"),
set: func(st *withdrawalsState, err error) {
st.setNextWithdrawalIndexErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.setNextWithdrawalIndexCalled)
require.Equal(t, false, st.setPayloadExpectedWithdrawalsCalled)
},
},
{
name: "set payload expected withdrawals error",
err: errors.New("payload expected err"),
set: func(st *withdrawalsState, err error) {
st.setPayloadExpectedWithdrawalsErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.setPayloadExpectedWithdrawalsCalled)
require.Equal(t, false, st.dequeueBuilderCalled)
},
},
{
name: "dequeue builder pending withdrawals error",
err: errors.New("dequeue builder err"),
set: func(st *withdrawalsState, err error) {
st.dequeueBuilderErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.dequeueBuilderCalled)
require.Equal(t, false, st.dequeuePartialCalled)
},
},
{
name: "dequeue pending partial withdrawals error",
err: errors.New("dequeue partial err"),
set: func(st *withdrawalsState, err error) {
st.dequeuePartialErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.dequeuePartialCalled)
require.Equal(t, false, st.setNextBuilderIndexCalled)
},
},
{
name: "set next withdrawal builder index error",
err: errors.New("next builder err"),
set: func(st *withdrawalsState, err error) {
st.setNextBuilderIndexErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.setNextBuilderIndexCalled)
require.Equal(t, false, st.nextValidatorIndexCalled)
},
},
{
name: "next withdrawal validator index error",
err: errors.New("next validator err"),
set: func(st *withdrawalsState, err error) {
st.nextValidatorIndexErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.nextValidatorIndexCalled)
require.Equal(t, false, st.setNextValidatorIndexCalled)
},
},
{
name: "set next withdrawal validator index error",
err: errors.New("set next validator err"),
set: func(st *withdrawalsState, err error) {
st.setNextValidatorIndexErr = err
},
check: func(t *testing.T, st *withdrawalsState) {
require.Equal(t, true, st.setNextValidatorIndexCalled)
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
st := base(t)
tc.set(st, tc.err)
err := ProcessWithdrawals(st)
require.ErrorIs(t, err, tc.err)
if tc.check != nil {
tc.check(t, st)
}
})
}
}
type withdrawalsState struct {
setNextValidatorIndexCalled bool
nextValidatorIndexCalled bool
setNextBuilderIndexCalled bool
dequeuePartialCalled bool
dequeueBuilderCalled bool
setPayloadExpectedWithdrawalsCalled bool
setNextWithdrawalIndexCalled bool
parentFull bool
expectedCalled bool
decreaseCalled bool
numValidators uint64
setNextWithdrawalIndexArg *uint64
nextValidatorIndex primitives.ValidatorIndex
setNextBuilderIndexArg *primitives.BuilderIndex
dequeuePartialArg *uint64
setNextValidatorIndexArg *primitives.ValidatorIndex
dequeueBuilderArg *uint64
state.BeaconState
setNextValidatorIndexErr error
setNextBuilderIndexErr error
dequeuePartialErr error
dequeueBuilderErr error
setPayloadExpectedWithdrawalsErr error
nextValidatorIndexErr error
decreaseErr error
expectedErr error
parentErr error
setNextWithdrawalIndexErr error
setPayloadExpectedWithdrawalsArg []*enginev1.Withdrawal
expectedResult state.ExpectedWithdrawalsGloasResult
}
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
return w.parentFull, w.parentErr
}
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
w.expectedCalled = true
if w.expectedErr != nil {
return state.ExpectedWithdrawalsGloasResult{}, w.expectedErr
}
return w.expectedResult, nil
}
func (w *withdrawalsState) DecreaseWithdrawalBalances(_ []*enginev1.Withdrawal) error {
w.decreaseCalled = true
return w.decreaseErr
}
func (w *withdrawalsState) SetNextWithdrawalIndex(index uint64) error {
w.setNextWithdrawalIndexCalled = true
w.setNextWithdrawalIndexArg = &index
return w.setNextWithdrawalIndexErr
}
func (w *withdrawalsState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
w.setPayloadExpectedWithdrawalsCalled = true
w.setPayloadExpectedWithdrawalsArg = withdrawals
return w.setPayloadExpectedWithdrawalsErr
}
func (w *withdrawalsState) DequeueBuilderPendingWithdrawals(n uint64) error {
w.dequeueBuilderCalled = true
w.dequeueBuilderArg = &n
return w.dequeueBuilderErr
}
func (w *withdrawalsState) DequeuePendingPartialWithdrawals(n uint64) error {
w.dequeuePartialCalled = true
w.dequeuePartialArg = &n
return w.dequeuePartialErr
}
func (w *withdrawalsState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
w.setNextBuilderIndexCalled = true
w.setNextBuilderIndexArg = &index
return w.setNextBuilderIndexErr
}
func (w *withdrawalsState) NextWithdrawalValidatorIndex() (primitives.ValidatorIndex, error) {
w.nextValidatorIndexCalled = true
if w.nextValidatorIndexErr != nil {
return 0, w.nextValidatorIndexErr
}
return w.nextValidatorIndex, nil
}
func (w *withdrawalsState) NumValidators() int {
return int(w.numValidators)
}
func (w *withdrawalsState) SetNextWithdrawalValidatorIndex(index primitives.ValidatorIndex) error {
w.setNextValidatorIndexCalled = true
w.setNextValidatorIndexArg = &index
return w.setNextValidatorIndexErr
}

View File

@@ -6,6 +6,7 @@ go_library(
"attestation.go",
"beacon_committee.go",
"block.go",
"builder.go",
"deposit.go",
"genesis.go",
"legacy.go",

View File

@@ -0,0 +1,12 @@
package helpers
import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
)
// IsBuilderWithdrawalCredential returns true if the withdrawal credentials indicate a builder.
func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
return len(withdrawalCredentials) == fieldparams.RootLength &&
withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
}

View File

@@ -33,6 +33,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -1,6 +1,9 @@
package peerdas
import (
stderrors "errors"
"iter"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -16,6 +19,7 @@ var (
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
ErrNoKzgCommitments = errors.New("no KZG commitments found")
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
ErrEmptySegment = errors.New("empty segment in batch")
ErrInvalidKZGProof = errors.New("invalid KZG proof")
ErrBadRootLength = errors.New("bad root length")
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
@@ -57,65 +61,104 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
return nil
}
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
// CellProofBundleSegment is returned when a batch fails. The caller can call
// the `.Verify` method to verify just this segment.
type CellProofBundleSegment struct {
indices []uint64
commitments []kzg.Bytes48
cells []kzg.Cell
proofs []kzg.Bytes48
}
// Verify verifies this segment without batching.
func (s CellProofBundleSegment) Verify() error {
if len(s.cells) == 0 {
return ErrEmptySegment
}
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
if err != nil {
return stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
}
return nil
}
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
// ignore the failed segment list since we are just passing in one segment.
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
return err
}
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
// Note: We are slightly deviating from the specification here:
// The specification verifies the KZG proofs for each sidecar separately,
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
// This is done to improve performance since the internal KZG library is way more
// efficient when verifying in batch.
// efficient when verifying in batch. If the batch fails, the failed segments
// are returned to the caller so that they may try segment by segment without
// batching. On success the failed segment list is empty.
//
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
// Compute the total count.
count := 0
for _, sidecar := range sidecars {
count += len(sidecar.Column)
}
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
commitments := make([]kzg.Bytes48, 0, sizeHint)
indices := make([]uint64, 0, sizeHint)
cells := make([]kzg.Cell, 0, sizeHint)
proofs := make([]kzg.Bytes48, 0, sizeHint)
commitments := make([]kzg.Bytes48, 0, count)
indices := make([]uint64, 0, count)
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for _, sidecar := range sidecars {
for i := range sidecar.Column {
var anySegmentEmpty bool
var segments []CellProofBundleSegment
for _, cellProofsIter := range cellProofsIters {
startIdx := len(cells)
for bundle := range cellProofsIter {
var (
commitment kzg.Bytes48
cell kzg.Cell
proof kzg.Bytes48
)
commitmentBytes := sidecar.KzgCommitments[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
len(proofBytes) != len(proof) {
return ErrMismatchLength
if len(bundle.Commitment) != len(commitment) ||
len(bundle.Cell) != len(cell) ||
len(bundle.Proof) != len(proof) {
return nil, ErrMismatchLength
}
copy(commitment[:], commitmentBytes)
copy(cell[:], cellBytes)
copy(proof[:], proofBytes)
copy(commitment[:], bundle.Commitment)
copy(cell[:], bundle.Cell)
copy(proof[:], bundle.Proof)
commitments = append(commitments, commitment)
indices = append(indices, sidecar.Index)
indices = append(indices, bundle.ColumnIndex)
cells = append(cells, cell)
proofs = append(proofs, proof)
}
if len(cells[startIdx:]) == 0 {
anySegmentEmpty = true
}
segments = append(segments, CellProofBundleSegment{
indices: indices[startIdx:],
commitments: commitments[startIdx:],
cells: cells[startIdx:],
proofs: proofs[startIdx:],
})
}
if anySegmentEmpty {
return segments, ErrEmptySegment
}
// Batch verify that the cells match the corresponding commitments and proofs.
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
if err != nil {
return errors.Wrap(err, "verify cell KZG proof batch")
return segments, stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
return segments, ErrInvalidKZGProof
}
return nil
return nil, nil
}
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.

View File

@@ -3,6 +3,7 @@ package peerdas_test
import (
"crypto/rand"
"fmt"
"iter"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
@@ -72,7 +73,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
@@ -80,14 +81,15 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0][0]++ // It is OK to overflow
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
})
t.Run("nominal", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
require.NoError(t, err)
require.Equal(t, 0, len(failedSegments))
})
}
@@ -273,7 +275,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
for _, sidecar := range sidecars {
sidecars := []blocks.RODataColumn{sidecar}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -308,7 +310,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -341,7 +343,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
for _, sidecars := range allSidecars {
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}

View File

@@ -5,6 +5,7 @@ import (
"sync"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -338,8 +339,16 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
return cellsPerBlob, proofsPerBlob, nil
}
// StructuredCellsAndProofs packages the results of computing cells and proofs from structured blobs.
type StructuredCellsAndProofs struct {
Included bitfield.Bitlist
CellsPerBlob [][]kzg.Cell
ProofsPerBlob [][]kzg.Proof
}
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
// commitmentCount is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (StructuredCellsAndProofs, error) {
start := time.Now()
defer func() {
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -347,14 +356,24 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
var wg errgroup.Group
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
var blobsPresent int
for _, blobAndProof := range blobsAndProofs {
if blobAndProof != nil {
blobsPresent++
}
}
cellsPerBlob := make([][]kzg.Cell, blobsPresent)
proofsPerBlob := make([][]kzg.Proof, blobsPresent)
included := bitfield.NewBitlist(commitmentCount)
var j int
for i, blobAndProof := range blobsAndProofs {
if blobAndProof == nil {
return nil, nil, ErrNilBlobAndProof
continue
}
included.SetBitAt(uint64(i), true)
compactIndex := j
wg.Go(func() error {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
@@ -381,17 +400,22 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
kzgProofs = append(kzgProofs, kzgProof)
}
cellsPerBlob[i] = cells
proofsPerBlob[i] = kzgProofs
cellsPerBlob[compactIndex] = cells
proofsPerBlob[compactIndex] = kzgProofs
return nil
})
j++
}
if err := wg.Wait(); err != nil {
return nil, nil, err
return StructuredCellsAndProofs{}, err
}
return cellsPerBlob, proofsPerBlob, nil
return StructuredCellsAndProofs{
Included: included,
CellsPerBlob: cellsPerBlob,
ProofsPerBlob: proofsPerBlob,
}, nil
}
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.

View File

@@ -479,8 +479,9 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
t.Run("nil blob and proof", func(t *testing.T) {
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
result, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
require.NoError(t, err)
require.Equal(t, uint64(0), result.Included.Count())
})
t.Run("nominal", func(t *testing.T) {
@@ -533,24 +534,25 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
require.NoError(t, err)
// Test ComputeCellsAndProofs
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
result, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
require.Equal(t, result.Included.Count(), uint64(len(result.CellsPerBlob)))
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsPerBlob))
require.Equal(t, blobCount, len(result.CellsPerBlob))
// Verify the results match expected
for i := range blobCount {
require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), cap(actualProofsPerBlob[i]))
require.Equal(t, len(expectedCellsPerBlob[i]), len(result.CellsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), len(result.ProofsPerBlob[i]))
require.Equal(t, len(expectedProofsPerBlob[i]), cap(result.ProofsPerBlob[i]))
// Compare cells
for j, expectedCell := range expectedCellsPerBlob[i] {
require.Equal(t, expectedCell, actualCellsPerBlob[i][j])
require.Equal(t, expectedCell, result.CellsPerBlob[i][j])
}
// Compare proofs
for j, expectedProof := range expectedProofsPerBlob[i] {
require.Equal(t, expectedProof, actualProofsPerBlob[i][j])
require.Equal(t, expectedProof, result.ProofsPerBlob[i][j])
}
}
})

View File

@@ -3,6 +3,7 @@ package peerdas
import (
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -23,11 +24,13 @@ var (
var (
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
_ ConstructionPopulator = (*PartialDataColumnHeaderReconstructionSource)(nil)
)
const (
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
BlockType = "BeaconBlock"
SidecarType = "DataColumnSidecar"
PartialDataColumnHeaderType = "PartialDataColumnHeader"
)
type (
@@ -54,6 +57,11 @@ type (
blocks.VerifiedRODataColumn
}
PartialDataColumnHeaderReconstructionSource struct {
*ethpb.PartialDataColumnHeader
root [fieldparams.RootLength]byte
}
blockInfo struct {
signedBlockHeader *ethpb.SignedBeaconBlockHeader
kzgCommitments [][]byte
@@ -71,25 +79,37 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
}
// PopulateFromPartialHeader creates a PartialDataColumnHeaderReconstructionSource from a partial header.
// It eagerly computes and validates the hash tree root of the block header.
func PopulateFromPartialHeader(header *ethpb.PartialDataColumnHeader) (*PartialDataColumnHeaderReconstructionSource, error) {
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
return nil, errors.New("nil signed block header or header")
}
root, err := header.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "hash tree root")
}
return &PartialDataColumnHeaderReconstructionSource{PartialDataColumnHeader: header, root: root}, nil
}
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
totalNodeBalance := uint64(0)
func ValidatorsCustodyRequirement(st beaconState.ReadOnlyBalances, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
cfg := params.BeaconConfig()
idxs := make([]primitives.ValidatorIndex, 0, len(validatorsIndex))
for index := range validatorsIndex {
validator, err := state.ValidatorAtIndexReadOnly(index)
if err != nil {
return 0, errors.Wrapf(err, "validator at index %v", index)
}
totalNodeBalance += validator.EffectiveBalance()
idxs = append(idxs, index)
}
totalBalance, err := st.EffectiveBalanceSum(idxs)
if err != nil {
return 0, errors.Wrap(err, "effective balances")
}
cfg := params.BeaconConfig()
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
validatorCustodyRequirement := cfg.ValidatorCustodyRequirement
balancePerAdditionalCustodyGroup := cfg.BalancePerAdditionalCustodyGroup
count := totalNodeBalance / balancePerAdditionalCustodyGroup
count := totalBalance / balancePerAdditionalCustodyGroup
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
}
@@ -143,6 +163,41 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
return roSidecars, nil
}
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator,
opts ...blocks.PartialDataColumnOption) ([]blocks.PartialDataColumn, error) {
start := time.Now()
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
dc, err := blocks.NewPartialDataColumn(src.Root(), info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof, opts...)
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
for i := range len(info.kzgCommitments) {
if !included.BitAt(uint64(i)) {
continue
}
dc.ExtendFromVerifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
cells[idx] = cells[idx][1:]
proofs[idx] = proofs[idx][1:]
}
dataColumns = append(dataColumns, dc)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return dataColumns, nil
}
// Slot returns the slot of the source
func (s *BlockReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()
@@ -254,3 +309,39 @@ func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
return info, nil
}
// Slot returns the slot from the partial data column header
func (p *PartialDataColumnHeaderReconstructionSource) Slot() primitives.Slot {
return p.SignedBlockHeader.Header.Slot
}
// Root returns the block root computed from the header
func (p *PartialDataColumnHeaderReconstructionSource) Root() [fieldparams.RootLength]byte {
return p.root
}
// ProposerIndex returns the proposer index from the header
func (p *PartialDataColumnHeaderReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
return p.SignedBlockHeader.Header.ProposerIndex
}
// Commitments returns the KZG commitments from the header
func (p *PartialDataColumnHeaderReconstructionSource) Commitments() ([][]byte, error) {
return p.KzgCommitments, nil
}
// Type returns the type of the source
func (p *PartialDataColumnHeaderReconstructionSource) Type() string {
return PartialDataColumnHeaderType
}
// extract extracts the block information from the partial header
func (p *PartialDataColumnHeaderReconstructionSource) extract() (*blockInfo, error) {
info := &blockInfo{
signedBlockHeader: p.SignedBlockHeader,
kzgCommitments: p.KzgCommitments,
kzgInclusionProof: p.KzgCommitmentsInclusionProof,
}
return info, nil
}

View File

@@ -267,4 +267,32 @@ func TestReconstructionSource(t *testing.T) {
require.Equal(t, peerdas.SidecarType, src.Type())
})
t.Run("from partial header", func(t *testing.T) {
referenceSidecar := sidecars[0]
partialHeader := &ethpb.PartialDataColumnHeader{
SignedBlockHeader: referenceSidecar.SignedBlockHeader,
KzgCommitments: referenceSidecar.KzgCommitments,
KzgCommitmentsInclusionProof: referenceSidecar.KzgCommitmentsInclusionProof,
}
src, err := peerdas.PopulateFromPartialHeader(partialHeader)
require.NoError(t, err)
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.Slot, src.Slot())
// Compute expected root
expectedRoot, err := referenceSidecar.SignedBlockHeader.Header.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, expectedRoot, src.Root())
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.ProposerIndex, src.ProposerIndex())
commitments, err := src.Commitments()
require.NoError(t, err)
require.Equal(t, 2, len(commitments))
require.DeepEqual(t, commitment1, commitments[0])
require.DeepEqual(t, commitment2, commitments[1])
require.Equal(t, peerdas.PartialDataColumnHeaderType, src.Type())
})
}

View File

@@ -108,6 +108,15 @@ func CanUpgradeToFulu(slot primitives.Slot) bool {
return epochStart && fuluEpoch
}
// CanUpgradeToGloas returns true if the input `slot` can upgrade to Gloas.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == GLOAS_FORK_EPOCH
func CanUpgradeToGloas(slot primitives.Slot) bool {
epochStart := slots.IsEpochStart(slot)
gloasEpoch := slots.ToEpoch(slot) == params.BeaconConfig().GloasForkEpoch
return epochStart && gloasEpoch
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//

Some files were not shown because too many files have changed in this diff Show More