Compare commits

...

141 Commits

Author SHA1 Message Date
terence tsao
00b32daaa4 Prysm rpc: Get local header 2024-09-28 09:46:11 -07:00
Potuz
c4550d75bd fix PayloadEnvelopeStateTransition test 2024-09-26 17:31:35 -03:00
terence tsao
1df3d0c7a3 Fix compute field roots with hasher 2024-09-26 12:50:45 -07:00
Potuz
f9bde3e097 export random execution request 2024-09-26 15:56:16 -03:00
Potuz
294a24616b fix build 2024-09-26 10:38:53 -03:00
terence
edd736fb9f Prysm rpc: submit signed execution payload header (#14441) 2024-09-25 16:46:13 -03:00
terence
13b469858d Update attestor and aggregator respect epbs intervals (#14454) 2024-09-25 16:46:13 -03:00
Potuz
70110ecb86 Provide a helper to compute the state root after a payload envelope (#14450) 2024-09-25 16:46:13 -03:00
Potuz
4cfafdf861 Fix pubkeyToIndex usage 2024-09-25 16:46:13 -03:00
terence
7f4091e334 Prysm rpc: Get payload attestation data (#14380) 2024-09-25 16:46:13 -03:00
Potuz
5addd5b3e9 Handle execution payload insertion in forkchoice (#14422) 2024-09-25 16:46:13 -03:00
Potuz
9abe898784 Add GetPTCVote helpers (#14420) 2024-09-25 16:46:13 -03:00
terence
391f6afab3 Add wait until PTC duty helper function (#14419)
Add wait until PTC duty
2024-09-25 16:46:13 -03:00
terence
1f62a72068 Prysm rpc: submit execution payload envelope (#14395) 2024-09-25 16:46:13 -03:00
Potuz
5d0549c2e0 Remove Changelog workflow 2024-09-25 16:46:13 -03:00
terence
fc794f48fb Prysm rpc: Submit payload attestation data (#14381) 2024-09-25 16:46:13 -03:00
Potuz
aaa60012e1 Receive ptc message (#14394)
* Handle incoming ptc attestation messages in the chain package

* fix double import

* remove unused error
2024-09-25 16:46:13 -03:00
Potuz
c21203ec78 Add ePBS fork schedule to config (#14383) 2024-09-25 16:46:13 -03:00
Potuz
a8a642a7b1 [ePBS] implement UpdateVotesOnPayloadAttestation (#14308) 2024-09-25 16:46:13 -03:00
Potuz
15250d7d9c Signed execution payload header for sync (#14363)
* Signed execution payload header for sync

* Use RO state

* SignedExecutionPayloadHeader by hash and root

* Fix execution headers cache
2024-09-25 16:46:13 -03:00
Potuz
8792b33b75 Refactor currentlySyncingPayload cache (#14350) 2024-09-25 16:46:13 -03:00
JihoonSong
4948035d7f Add unit tests of ExecutionPayloadEnvelope verification (#14373)
* Correct requirement list of EnvelopeVerifier

* Add unit tests of ExecutionPayloadEnvelope verification
2024-09-25 16:46:13 -03:00
terence
8cda3f0589 Enable validator client to sign execution payload envelope (#14346)
* Enable validator client to sign execution payload envelope

* Update comment

Co-authored-by: JihoonSong <jihoonsong@users.noreply.github.com>

---------

Co-authored-by: JihoonSong <jihoonsong@users.noreply.github.com>
2024-09-25 16:46:13 -03:00
Potuz
e7d840e843 Sync changes to process execution payload envelopes 2024-09-25 16:46:13 -03:00
Md Amaan
625308d7c5 Process withdrawal (#14297)
* process_withdrawal_fn and isParentfull test

* suggestions applied

* minor change

* removed

* lint

* lint fix

* removed Latestheader

* test added with nil error

* tests passing

* IsParentNode Test added

* lint

* fix test

* updated godoc

* fix in godoc

* comment removed

* fixed braces

* removed var

* removed var

* Update beacon-chain/core/blocks/withdrawals.go

* Update beacon-chain/core/blocks/withdrawals_test.go

* gazelle

* test added and removed previous changes in Testprocesswithdrawal

* added check for nil state

* decrease chromatic complexity

---------

Co-authored-by: Potuz <potuz@potuz.net>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2024-09-25 16:46:13 -03:00
Potuz
523422ed84 Enable validator client to sign execution header (#14333)
* Enable validator client to sign execution header

* Update proto/prysm/v1alpha1/validator-client/keymanager.proto

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2024-09-25 16:46:13 -03:00
terence
2efb3065be Initialize payload att message verfier in sync (#14323) 2024-09-25 16:46:13 -03:00
terence
dc538b9696 Add getter for payload attestation cache (#14328)
* Add getter for payload attestation cache

* Check against status

* Feedback #1
2024-09-25 16:46:13 -03:00
Potuz
ca32889e1a Payload Attestation Sync package changes (#13989)
* Payload Attestation Sync package changes

* With verifier

* change idx back to uint64

* subscribe to topic

* add back error

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-09-25 16:46:13 -03:00
Potuz
a1b6059051 Process Execution Payload Envelope in Chain Service (#14295)
Adds the processing of execution payload envelope
Corrects the protos for attestations and slashings in Electra versions
Adds generators of full blocks for Electra
2024-09-25 16:46:09 -03:00
Md Amaan
3a61df4a96 Indexed paylaod attestation test (#14299)
* test-added

* nil check fix

* randomized inputs

* hardcoded inputs

* suggestions applied

* minor-typo fixed

* deleted
2024-09-25 16:44:09 -03:00
JihoonSong
bdabbef300 Add execution_payload and payload_attestation_message topics (#14304)
* Add `execution_payload` and `payload_attestation_message` topics

* Set `SourcePubkey` to 48 bytes long

* Add randomly populated `PayloadAttestationMessage` object

* Add tests for `execution_payload` and `payload_attestation_message` topics
2024-09-25 16:44:09 -03:00
terence
c6362fa5a6 Broadcast signed execution payload header to peer (#14300) 2024-09-25 16:44:09 -03:00
terence
5688afc6a4 Read only payload attestation message with Verifier (#14222)
* Read only payload attestation message with verifier

* Payload attestation tests (#14242)

* Payload attestation in verification package

* Feedback #1

---------

Co-authored-by: Md Amaan <114795592+Redidacove@users.noreply.github.com>
2024-09-25 16:44:09 -03:00
Potuz
f0fe5bbab1 Allow nodes with and without payload in forkchoice (#14288)
* Allow nodes with and without payload in forkchoice

    This PR takes care of adding nodes to forkchoice that may or may not
    have a corresponding payload. The rationale is as follows

    - The node structure is kept almost the same as today.
    - A zero payload hash is considered as if the node was empty (except for
      the tree root)
    - When inserting a node we check what the right parent node would be
      depending on whether the parent had a payload or not.
    - For pre-epbs forks all nodes are full, no logic changes except a new
      steps to gather the parent hash that is needed for block insertion.

    This PR had to change some core consensus types and interfaces.
    - It removed the ROBlockEPBS interface and added the corresponding ePBS
      fields to the ReadOnlyBeaconBlockBody
    - It moved the setters and getters to epbs dedicated files.

    It also added a checker for `IsParentFull` on forkchoice that simply
    checks for the parent hash of the parent node.

* review
2024-09-25 16:43:55 -03:00
Potuz
107c1f9e04 Use BeaconCommittees helper to get the ptc (#14286) 2024-09-25 16:42:00 -03:00
JihoonSong
42be4ee3f5 Add payload attestation helper functions (#14258)
* Add `IndexedPayloadAttestation` container

* Add `GetPayloadAttestingIndices` and its unit test

* Add `GetIndexedPayloadAttestation` and its unit test

* Add `is_valid_indexed_payload_attestation` and its unit test

* Create a smaller set of validators for faster unit test

* Pass context to `GetPayloadTimelinessCommittee`

* Iterate `ValidatorsReadOnly` instead of copying all validators
2024-09-25 16:42:00 -03:00
Potuz
dbd9141eca Use slot for latest message in forkchoice (#14279) 2024-09-25 16:42:00 -03:00
Potuz
68fdcf9776 Ensure epbs state getters & setters check versions (#14276)
* Ensure EPBS state getters and setters check versions

* Rename to LatestExecutionPayloadHeaderEPBS

* Add minimal beacon state
2024-09-25 16:41:55 -03:00
JihoonSong
872269c7cb Add remove_flag and its unit test (#14260)
* Add `remove_flag` and its unit test

* Add a test case trying to remove a flag that is not set
2024-09-25 16:41:14 -03:00
JihoonSong
e60c5322ef Modify get_ptc function to follow the Python spec (#14256)
* Modify `get_ptc` function to follow the Python spec

* Assign PTC members from the beginning of beacon committee array
2024-09-25 16:41:14 -03:00
Potuz
e62ee87911 Remove inclusion list from epbs (#14188) 2024-09-25 16:41:09 -03:00
Potuz
19a7c0b8c3 Enable validator client to submit payload attestation message (#14064) 2024-09-25 16:38:37 -03:00
Potuz
1eba10c0e2 Add PTC assignment support for Duty endpoint (#14032) 2024-09-25 16:38:37 -03:00
Potuz
fa559e30dd use Keymanager() in validator client 2024-09-25 16:38:29 -03:00
Potuz
b42cc45e44 Change gwei math to primitives package for ePBS state 2024-09-25 16:37:46 -03:00
terence
ac127c95c8 Fix GetPayloadTimelinessCommittee to return correct PTC size (#14012) 2024-09-25 16:31:53 -03:00
Potuz
c30f94d4be Add ePBS to db (#13971)
* Add ePBS to db
2024-09-25 16:30:26 -03:00
Potuz
099efc1f3c Add EPBS slashing params 2024-09-25 16:23:55 -03:00
Potuz
33e33978e3 Implement get_ptc
This implements a helper to get the ptc committee from a state. It uses
the cached beacon committees if possible

It also implements a helper to compute the largest power of two of a
uint64 and a helper to test for nil payload attestation messages
2024-09-25 16:23:55 -03:00
Potuz
ff952dae4e Add ePBS to state (#13926) 2024-09-25 16:23:49 -03:00
Potuz
440b173feb Add testing utility methods to return randomly populated ePBS objects 2024-09-25 16:01:15 -03:00
Potuz
f126d64c96 Add ePBS stuff to consensus-types: block 2024-09-25 16:01:09 -03:00
Potuz
8f170f97ec Helper for Payload Attestation Signing (#13901) 2024-09-25 15:55:25 -03:00
Potuz
da3159dd6a ePBS configuration constants 2024-09-25 15:54:05 -03:00
Potuz
31ae744237 Add ePBS beacon state proto 2024-09-25 15:54:01 -03:00
Potuz
3b2d29dce4 Add protos for ePBS except state 2024-09-25 15:51:53 -03:00
Potuz
ddafedc268 Implement consensus-specs/3875 (#14458)
* WIP

- beacon-chain builds

* pass blockchain tests

* pass beacon-chain/execution tests

* Passing RPC tests

* fix building

* add changelog

* fix linters

* Spectests

* copy requests on Copy()

* Fix tests

* Fix config test

* fix verification tests

* add aliases for Electra types

* double import and unskip spectests

* Remove unnecessary comment
2024-09-25 17:06:52 +00:00
Nishant Das
7e5738bfcd Update Pubsub Library To Use Gossipsub 1.2 (#14428)
* Update Libp2p Packages

* Update CHANGELOG

* Finally Fix All Tests

* Fix Build

* Fix Build

* Fix TestP2P Connection Initialization

* Fix TestP2P Host Options

* Fix Test By Removing WaitGroup
2024-09-24 16:15:19 +00:00
Preston Van Loon
315c05b351 async/event: Use geth's implementation of event/feed.go (#14362)
* Use geth's event

* run gazelle
2024-09-23 19:57:04 +00:00
Jun Song
3662cf6009 Add Electra case for GetBeaconStateV2 (#14466)
* Add Electra case for GetBeaconStateV2

* Add CHANGELOG.md entry

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-09-20 21:20:13 +00:00
james-prysm
98d8b50b0e deflake pushsettings test - custom pattern matcher (#14453)
* adding custom matcher

* changelog

* removing unneeded map

* fixing merge conflict
2024-09-20 21:15:47 +00:00
Jun Song
1a1cc25bd1 Deprecate pb.gw.go files (#14464)
* Clean pb.gw.go files

* Deprecate pb.gw.go file support
2024-09-20 17:18:17 +00:00
james-prysm
bc9c7193a9 FIX: removing extra container for ssz blob sidecar response (#14451)
* removing extra container for ssz blob sidecar response

* gaz and changelog

* gaz

* fixing dependencies

* fixing test

* updating values based on feedback"
2024-09-16 14:28:47 +00:00
Radosław Kapka
7ac3c01b5b Refactor light client functions (#14434)
* Use correct types in light client functions

* conversions

* more refactoring

* test fixes

* changelog

* error fix

* revert test changes

* revert test skip

* Update api/server/structs/conversions_lightclient.go

Co-authored-by: Rupam Dey <117000803+rupam-04@users.noreply.github.com>

* use BlockToLightClientHeader

* reviewer suggestion

* Revert "use BlockToLightClientHeader"

This reverts commit f3df56ded5.

---------

Co-authored-by: Rupam Dey <117000803+rupam-04@users.noreply.github.com>
2024-09-13 22:08:28 +00:00
Bastin
ed6f69e868 Upgrade LightClient DB (#14432)
* add kv tests for capella and deneb

* test execution fields

* Update proto/eth/v2/custom.go

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* Update proto/eth/v2/custom.go

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* Update proto/eth/v2/custom.go

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* refactor tests using deepEqual

---------

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-09-13 03:53:02 +00:00
Preston Van Loon
222b360c66 Electra: Remove signing domain for consolidations (#14437)
* Electra: Remove signing domain for consolidations. See https://github.com/ethereum/consensus-specs/pull/3915

* Update changelog
2024-09-13 02:00:41 +00:00
Sammy Rosso
170a864239 Otel migration (#14424)
* remove opencensus

* gaz

* update dependencies

* add missing dependencies

* fix test?

* Fix note relevance

* add otel http transport middleware

* gaz

* tidy up

* gaz

* changelog

* feedback

* gaz

* fix merge issues
2024-09-12 23:00:20 +00:00
Rupam Dey
b5cfd0d35d feat: implement block_to_light_client_header (#14433)
* implement `block_to_light_client_header` upto Deneb

* update `CHANGELOG.md`

* refactor: remove unnecessary variables

* move functions to `core/light-client/lightclient.go`

* feat: add tests

* refactors

* add blinded beacon block support to tests

* revert "add blinded beacon block support to tests"

* add tests for blinded beacon block

* lint

* refactor: move common code outside of if-else

* fix dependencies
2024-09-12 17:53:51 +00:00
hopinheimer
28181710b0 version bump on k8s io client-go and apimachinery (#14444)
* version bump on k8s io client-go and apimachinery

* bazel file served

* fixing build issues

* some changes in noops functions

* Update CHANGELOG.md

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2024-09-12 14:59:43 +00:00
terence
2f756b7ec4 Refactor logging proposed block (#14443) 2024-09-12 14:08:10 +00:00
terence
df4ca54a76 Remove unused blobs bundle cache and usages (#14438) 2024-09-12 01:28:22 +00:00
james-prysm
875e3e5e7d E2E: fixing builder route registration after gorilla mux removal (#14445)
* fixing builder route registration afte gorilla mux removal

* changelog
2024-09-11 23:15:08 +00:00
Md Amaan
a5317f8117 Replaced mux with http.Servemux (#14416)
* Replaced mux with http.Servmux

* updated change log

* james suggestions

* lint

* lint fix 2

* passed middlewares from validatorclient

* gazelle fix

* fixed issue

* added middlewares field to rpc config

* suggestions applied

* updated godoc

* fixed TestCors

* refactor

* godoc added

* cli code removed and lint fixed

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-09-11 19:39:05 +00:00
Potuz
38b92c0171 Avoid allocating in loops (#14439)
* Avoid allocating in loops

* change process slashings

* add changelog

* Kasey's review

* Only marked trie dirty if changed
2024-09-11 17:58:07 +00:00
Potuz
a03b34af77 Use read only validators on ApplyToEveryValidator (#14426)
* Use read only validators on ApplyToEveryValidator

* Use ReadFromEveryValidator on slashing

* change changelog

* Revert "Use ReadFromEveryValidator on slashing"

This reverts commit 74c055bddb56e0573075c71df8a40f1c6a9bfdfd.
2024-09-10 11:34:42 +00:00
james-prysm
4c14bd8be2 looking at ways to reduce validator registration calls (#14371)
* looking at ways to reduce validator registration calls

* small mistake, should be epoch start

* adding more optimizations for reducing registration calls while covering more edgecases

* linting

* adding change log and force full push override

* fixing bug and adding tests

* changing if statement just to be safe

* potuz feedback for easier readability

* more review feedback for simplicity

* more review suggestions from potuz

* fix unit test

* reduce redundancy

* Update CHANGELOG.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* small nitpick

* fixing typo

* updating logs

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-09-09 16:30:41 +00:00
Radosław Kapka
62b8e63a0a Compare with nil before invoking IsNil() on an interface (#14431)
* Compare with nil before invoking `IsNil()` on an interface

* changelog

* review
2024-09-06 19:39:33 +00:00
Rupam Dey
eec3b0b7fe feat: introduce Capella and Deneb full-node.md lc changes (#14376)
* feat: introduce Capella and Deneb `full-node.md` lc changes

* add switch-case and replace `[][]byte` with `[][]string`

* return version name in http header

* populate header and use `interfaces.ReadOnlyBeaconBlock`

* fix lint

* merge cases in switch case and replace `interfaces.ExecutionData` with `*ExecutionPayloadHeader`

* minor fixes

* refactor `createLightClientBootstrapCapella` and `createLightClientBootstrapDeneb`

* use lightclientheader instead of different versions

* fix failing `TestLightClientHandler_GetLightClientBootstrap` tests

* fix lint

* refactor handlers

* refactor handlers more

* refactor handlers even more

* create conversions_lightclient

* fix lint errors

* add deneb and capella proto headers

* update lightclientbootstrap proto struct to capella&deneb

* update usecases

* update usecases

* resolve panic in header.GetBeacon

* fix spacings

* refactor core/lightclient.go

* fix isBetterUpdate

* use errors.wrap instead of fmt.errorf

* changelog entry

* fix lint errors

* fix api structs to use json rawMessage

* inline unmarshal

* remove redundant nil check

* revert remove redundant nil check

* return error in newLightClientUpdateToJSON

* inline getExecutionData

* better error handling

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Inspector-Butters <mohamadbastin@gmail.com>
Co-authored-by: Bastin <43618253+Inspector-Butters@users.noreply.github.com>
2024-09-06 17:06:31 +00:00
james-prysm
2bffb83a00 gateway flag changes breaking release e2e (#14418)
* e2e release test breaks due to changes in flag naming

* changing approach to fix

* adding some small alias test

* fixing test, changelog, and flag name

* Update config_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-09-05 21:19:56 +00:00
james-prysm
45fd3eb1bf gRPC Gateway Removal (#14089)
* wip passing e2e

* reverting temp comment

* remove unneeded comments

* fixing merge errors

* fixing more bugs from merge

* fixing test

* WIP moving code around and fixing tests

* unused linting

* gaz

* temp removing these tests as we need placeholder/wrapper APIs for them with the removal of the gateway

* attempting to remove dependencies to gRPC gateway , 1 mroe left in deps.bzl

* renaming flags and other gateway services to http

* goimport

* fixing deepsource

* git mv

* Update validator/package/validator.yaml

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/package/validator.yaml

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback

* missed lint

* renaming import

* reversal based on feedback

* fixing web ui registration

* don't require mux handler

* gaz

* removing gRPC service from validator completely, merged with http service, renames are a work in progress

* updating go.sum

* linting

* trailing white space

* realized there was more cleanup i could do with code reuse

* adding wrapper for routes

* reverting version

* fixing dependencies from merging develop

* gaz

* fixing unit test

* fixing dependencies

* reverting unit test

* fixing conflict

* updating change log

* Update log.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* gaz

* Update api/server/httprest/server.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* addressing some feedback

* forgot to remove deprecated flag in usage

* gofmt

* fixing test

* fixing deepsource issue

* moving deprecated flag and adding timeout handler

* missed removal of a flag

* fixing test:

* Update CHANGELOG.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback

* updating comments based on feedback

* removing unused field for now, we can add it back in if we need to use the option

* removing unused struct

* changing api-timeout flag based on feedback

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-09-04 15:40:31 +00:00
terence
963a1b4cb7 Fix electra balance update (#14410) 2024-09-03 14:41:40 +00:00
Rupam Dey
77c845043d fix: make some places use sync committee period instead of epoch (#14406)
* fix: make some places use sync committee period instead of epoch

* add fix details to `CHANGELOG.md`

* update `CHANGELOG.md` comment
2024-09-02 16:44:08 +00:00
Brandon Liu
342bb0fcef Add value for MaxBuilderEpochMissedSlots (#14334)
* add value for MaxBuilderEpochMissedSlots

* make usage for MaxBuilderEpochMisedSlots more friendly

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-08-30 15:08:50 +00:00
Preston Van Loon
93e6bd7929 Flip --enable-experimental-state to opt-out. (#14398) 2024-08-29 20:42:32 +00:00
Bastin
3015eea4e3 Fix lightclient header (#14389)
* change LCUpdate to use LCHeader

* fix api struct usages

* fix api struct finalized_header

* add lightclientheader to proto structs

* fix proto usages

* fix proto usages in events

* fix uppercase field in protobuf defenition

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-29 15:57:21 +00:00
Nishant Das
2f42f7e313 Add a Tracing Wrapper Package (#14207)
* Adds a wrapper package

* Gazelle

* Add in Empty Span

* Revert It Back

* Add back reference

* Set It As Empty

* fix missing import

* remove redundant alias

* remove unused

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: Saolyn <sammy@rosso.red>
2024-08-27 20:00:53 +00:00
Preston Van Loon
a7c86a6d1b Update CONTRIBUTING.md to highlight no trivial changes (#14384) 2024-08-26 18:05:24 +00:00
Md Amaan
2399451869 Hardcoded GenesisValidatorsRoot (#14365)
* hardcoded GenesisValidatorsRoot

* added in mainnet.config

* updated desc

* added it in all testnets

* minor change

* added roots instead of empty and fn to compute byte32 from hex

* added in e2e testnet_config

* fixed test

* minor fix

* removed fn and added bytes output directly

* Add test for genesis validator root mainnet value

* removed root from minimal and testnet

* removed root

* Update CHANGELOG.md

* Fix bazel package visiblity

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2024-08-26 16:50:29 +00:00
Taranpreet26311
19d9a1915d PR to update workflow check dependencies (#14379)
* PR to update workflow check dependencies

* Updated build checkout version to v4

* Updated to go 1.23.0

* Updated lint version to v1.60.3

* Revert to 1.22.3

* Updated go to 1.23

* revert

* Updated setup-go to v5

* Update lint to 1.60.2

* Revert changes

* Update Lint version to v1.60.3

* Update lint to go 1.23.0

* Update golanci.yml to 1.23.0

* Revert and keep to golang 1.22.4

* Disable mnd

* Downgrade to current version

* Add update to go 1.26

* Update to go 1.22.6

* Update .golangci.yml to 1.22.6
2024-08-26 16:25:38 +00:00
james-prysm
261921ae4c reduce validator registration logs (#14370)
* reduce validator registration logs

* reverting a log change that's probably better as warn

* Update CHANGELOG.md

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2024-08-23 16:47:47 +00:00
Sammy Rosso
6ad8a104dd Re-use duplicate code in aggregator (#14342)
* move shared duplicate code

* rename function
2024-08-23 16:38:49 +00:00
Jay
50e53265a1 bugfix : Removed the default value of the bootnode flag to prevent it from being overridden during testnet usage (#14357)
* Removed the default value of the bootnode flag to prevent it from being overridden during testnet usage

* bugfix for checking stringslice flag to use isSet
2024-08-23 16:28:26 +00:00
Preston Van Loon
3392fdb21d Add CHANGELOG.md and update CONTRIBUTING.md (#13673)
* Add github action requiring a diff to changelog.md.

* Update CONTRIBUTING.md with CHANGELOG.md requirements and restrictions on trivial PRs

* Update tj-actions/changed-files

* Backfill CHANGELOG.md

* Remove hashtag references to PRs

* Append PR template

* Add unreleased changes

* Update CONTRIBUTING.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-23 15:11:06 +00:00
Rupam Dey
dd3c9652c3 fix: replace BeaconBlockHeader in createLightClientBootstrap with LightClientHeader (#14374)
* fix: replace `BeaconBlockHeader` in `createLightClientBootstrap` with `LightClientHeader`

* minor fix in `handlers_test.go`

* check if `beacon` is `nil` instead of `header`

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-22 17:13:10 +00:00
Bastin
022a53f8f2 create light-client-updates bucket (#14266)
* create light-client-updates bucket

* Electra committe validation for aggregate and proof (#14317)

* Electra committe validation for aggregate and proof

* review

* update comments

* Refactor get local payload (#14327)

* Refactor get local payload

* Fix go lint: new line

* add lightclient db kv functions

* lightclient db tests

* move blockchain/lightclient.go to core/light-client package

* add comparison check for start and end period

* create testing/utils/lightcilent.go

* lightclient db tests

* fix imports and usages

* fix imports and usages in process_block_helpers

* fix bazel dependencies

* remove unnecessary nil check

* add more tests for lightclient kv functions

* refactor tests

* refactor kv.LightClientUpdates

* fix db to return every update that is available in the requested range

* run gazzele fix

* return empty map in case of empty db

* fix goimports errors

* goimports

* Revert "Auxiliary commit to revert individual files from aa7ce6f37cb6767cf11642b022b2ce59d42ae621"

This reverts commit 33c707f5bd164386449dc14ff27d95ad5f195161.

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: terence <terence@prysmaticlabs.com>
Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
2024-08-22 16:00:18 +00:00
Preston Van Loon
2fa3547644 Update spectests to v1.5.0-alpha.5. Copied from #14352 (#14368) 2024-08-22 14:03:48 +00:00
Rupam Dey
7c213ce161 feat: implement PayloadProof function (#14356)
* feat: implement function `PayloadProof` to calculate proof of execution payload

* remove comments

* feat: implement function to compute field roots of

* feat: implement function to compute `BeaconBlock` field roots and add tests

* fix dependencies

* check if interface implements the assserted type

* fix: lint

* replace `ok != true` with `!ok`

* remove unused parameter from `PayloadProof`

* remove test and move `PayloadProof` to `blocks/proofs.go`

* remove `PayloadProof` from `fieldtrie`

* replace `fieldtrie.ProofFromMerkleLayers` with `trie.ProofFromMerkleLayers`

* Update container/trie/sparse_merkle.go

* update dependencies

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>
2024-08-21 16:04:35 +00:00
Radosław Kapka
ed3d7d49ec Update block publishing Beacon APIs to Electra (#14361)
* Update block publishing Beacon APIs to Electra

* linter
2024-08-20 16:57:08 +00:00
Potuz
068139a78a remove unused function (#14360) 2024-08-19 16:20:57 +00:00
Nishant Das
8dd7361b6a Fix Gas Limit in Genesis (#14359)
* Fix Gas Limit in Genesis

* Comment

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-08-19 14:29:49 +00:00
Radosław Kapka
41ea1d230a Electra API struct conversions (#14339)
* Electra API conversions

* reduce function complexity

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-08-19 11:46:56 +00:00
John
9e25026519 Fix error handling to use os.IsNotExist (#14315)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-16 20:18:16 +00:00
Jun Song
9e9559df60 docs: Add consolidation_requests_root on comment (#14335)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-16 19:46:00 +00:00
kira
7a5a6c7e54 fix a deep source error (#14345)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-16 19:06:00 +00:00
Nishant Das
be317c439d Allow Block With Blobs To Be Proposed in Electra (#14353)
* Allow blobs in Electra

* Add Test

* Add Error for unknown request type

* Fix Test

* fix tests

* clean up logic

---------

Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
2024-08-16 15:39:15 +00:00
terence
f43383a3fb Proposer checks gas limit before accepting builder's bid (#14311)
* Proposer checks gas limit before accepting builder's bid

* James feedback

* Use cache
2024-08-15 21:03:11 +00:00
terence
22f6f787e1 Update spec tests to v1.5.0-alpha.4 (#14340)
* Update spec tests to v1.5.0-alpha.4

* Add Fix for Off By 1 In Consolidations

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: nisdas <nishdas93@gmail.com>
2024-08-15 05:59:41 +00:00
james-prysm
44b3986025 removing skip from test (#14343) 2024-08-14 16:33:01 +00:00
james-prysm
6cb845660a fixing electra attestation inconsistencies (#14331)
* fixing electra attestation inconsistencies

* adding dependencies

* removing helper circular dependency

* adding error

* simplifying function

* improving get committee index function

* fixing more instances of GetData().committeeIndex and fixing tests

* Update proto/prysm/v1alpha1/attestation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback and fixing linting

* removing unused functions and associated tests

* fixing test error checks

* removing helpers.VerifyAttestationBitfieldLengths to reduce beaconCommitteeFromState calls

* small optimizations

* fixing linting

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-14 15:24:53 +00:00
Radosław Kapka
de2c866707 Keep read lock in BeaconState.getValidatorIndex (#14341) 2024-08-14 13:09:29 +00:00
Rupam Dey
74ddb84e0a feat: implement `ComputeFieldRootsForBlockBody` function (#14278)
* feat: (WIP)implement ``ComputeFieldRootsForBlockBody`` function to compute roots of fields in BlockBody

* calculate field roots upto ``deposits``

* add some required constants fix some errors

* implement ``ComputeFieldRootsForBlockBody`` function for all fields in ``BeaconBlockBody``

* populate `fieldRoots` based on block body version

* fix constants

* `bazel run //:gazelle -- fix`

* remove nested `if`s

* formatting 1

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* formatting 2

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* remove unrequired check

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* update naming 1

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>

* update function name

* add test for Phase0 block body

* update Phase0 test

* update phase0 test without setting any fields

* fix some errors in Phase0 test

* don't set fields

* add altair test

* add tests upto Electra

* fix the function for deneb and newer forks

* update dependencies

* add checks to ensure interface implements the asserted type

---------

Co-authored-by: Radosław Kapka <radoslaw.kapka@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-14 12:46:07 +00:00
Sammy Rosso
0c6a068fd5 Remove identical beacon state (#14338)
* rename + delete

* remove ref
2024-08-14 10:16:19 +00:00
Jun Song
fad92472d8 fix(tests): Fix v1alpha1/node/server_test.go (#14321)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-08-13 08:25:43 +00:00
james-prysm
2a44e8e6ec test util functions for electra field generation (#14320)
* adding some util functions for electra field generation

* linting

* adding missed linting
2024-08-12 15:36:03 +00:00
terence
e3d27f29c7 Refactor get local payload (#14327)
* Refactor get local payload

* Fix go lint: new line
2024-08-09 17:04:43 +00:00
Radosław Kapka
e011f05403 Electra committe validation for aggregate and proof (#14317)
* Electra committe validation for aggregate and proof

* review

* update comments
2024-08-08 16:32:19 +00:00
Potuz
b8cd77945d Move slasher handling down the pipeline (#14322) 2024-08-08 13:51:40 +00:00
Radosław Kapka
9a7f521f8a Fix state upgrade log (#14316) 2024-08-07 15:32:54 +00:00
Preston Van Loon
102f94f914 Clean up: Deduplicate fork spec tests pasta (#14289)
* Deduplicate code in spectest operations

Deduplicate code in blockchain core packages.

* Deduplicate code in blockchain core packages.
2024-08-07 15:16:37 +00:00
james-prysm
0c0a497651 engine_getPayloadBodiesByRangeV1 - fix, adding hexutil encoding on request parameters (#14314)
* adding hexutil encoding on request parameters

* fix for test

* fixing more tests

* deepsource fix
2024-08-07 15:05:24 +00:00
Sammy Rosso
e0785a8939 HTTP endpoint for GetValidatorActiveSetChanges (#14264)
* add GetValidatorActiveSetChanges

* fix linter

* fix errors

* James' review

* use stater

* fix merge conflict errors

* remove validator from func names

* rename util funcs
2024-08-06 11:56:43 +00:00
Potuz
af098e737e Check locally for min-bid and min-bid-difference (#14205)
* Check locally for min-bid and min-bid-difference

* fix tests

* Terence's fix

Co-authored-by: terence <terence@prysmaticlabs.com>

* Terence's review

* fix tests

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2024-08-05 14:20:26 +00:00
Potuz
1e4ede5585 Reduce cognitive complexity of ReceiveBlock (#14296)
* Reduce cognitive complexity of ReceiveBlock

* Remove stray line

Co-authored-by: terence <terence@prysmaticlabs.com>

---------

Co-authored-by: terence <terence@prysmaticlabs.com>
2024-08-05 12:44:49 +00:00
Sammy Rosso
fb2620364a HTTP endpoint for GetChainHead (#14262)
* add getChainHead endpoint

* James' review

* Radek' review

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-05 08:49:37 +00:00
Ryan
68b38b6666 remove broken link (#14291)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-02 21:36:09 +00:00
Jun Song
ff3e0856a1 fix(tests): Correct misleading variable names and expressions in test files (#14292)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-02 21:32:53 +00:00
Potuz
85f334b663 Return parent root from forkchoice (#14293) 2024-08-02 14:36:37 +00:00
Potuz
10f520accb process Electra operations in the right order (#14294)
* process Electra operations in the right order

* godoc
2024-08-02 14:30:52 +00:00
Sammy Rosso
836608537e HTTP endpoint for GetValidatorParticipation (#14261)
* add endpoint

* remove canonicalFetcher

* Add replayerBuilder to coreService

* fix endpoint template

* fix string query params

* gaz

* fix linter

* test fix

* Radek' review

* remove unused request struct + gaz

* linter

* gaz

---------

Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-08-02 13:55:17 +00:00
Radosław Kapka
13e09c58f6 Committee-aware attestation packing (#14245)
* initial algorithm

* ready for review

* feature flag

* typo

* review

* comment fix

* fix TestProposer_sort_DifferentCommittees

* flag usage
2024-08-02 11:53:43 +00:00
Potuz
600ca08aa8 Use the beacon block root to verify the prestate (#14290) 2024-08-01 20:22:37 +00:00
james-prysm
0ed74b3c4a removing explicit deadlines on push proposer settings (#14285)
* removing deadlines

* fixing mock interface implementation
2024-07-31 18:28:50 +00:00
Potuz
7c69a9aa1c Refactor CommitteeAssignments to get all committees at once (#14284) 2024-07-31 16:01:19 +00:00
Potuz
c50cfb044a Get all beacon committees at once (#14282)
* Get all beacon committees at once

* Update beacon-chain/core/helpers/beacon_committee.go

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-07-31 12:52:23 +00:00
ethfanWilliam
38d4e179ba chore: fix unfound image (#14167)
* Remove Feature Flag From Prater (#12082)

* Use Epoch boundary cache to retrieve balances (#12083)

* Use Epoch boundary cache to retrieve balances

* save boundary states before inserting to forkchoice

* move up last block save

* remove boundary checks on balances

* fix ordering

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>

* chore: fix unfound image

* fix link

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-07-31 12:10:18 +00:00
shangchengbabaiban
be80728320 chore: fix some comments (#14270)
Signed-off-by: shangchengbabaiban <shuang.cui@live.cn>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-07-31 11:06:15 +00:00
james-prysm
09028033c0 adding corrected returns under handle error (#14280) 2024-07-30 22:02:25 +00:00
james-prysm
52c036c3ab missed adding electra process epoch on state replay - devnet 1 (#14272)
* missed adding electra process epoch on state replay

* abstracting common functionality as to not forget in processing

* removing unnessesary error check

* breaking out core process slots logic out

* fixing small bug in epoch processing

* adding small log to fix linting and make use of logrus in the package

* adding some unit tests for process epoch

* potuz's review feedback

* reversing processing order based on feedback
2024-07-30 20:52:55 +00:00
james-prysm
2fc7cdeba7 Push proposer settings every slot (#14155)
* test push settings every slot

* updating comments

* changing design a little bit based on feedback

* adding corrected deadline
2024-07-30 19:30:56 +00:00
james-prysm
6f7976766d moving cloner functions to beacon_block.go (#14265)
* moving cloner functions for beacon block and adding fuzzing tests

* fixing test
2024-07-29 15:13:21 +00:00
781 changed files with 43032 additions and 24855 deletions

View File

@@ -10,6 +10,7 @@
in review.
4. Note that PRs updating dependencies and new Go versions are not accepted.
Please file an issue instead.
5. A changelog entry is required for user facing issues.
-->
**What type of PR is this?**
@@ -28,3 +29,9 @@
Fixes #
**Other notes for review**
**Acknowledgements**
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.

View File

@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Go mod tidy checker
id: gomodtidy
@@ -27,11 +27,11 @@ jobs:
GO111MODULE: on
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up Go 1.22
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: '1.22.3'
go-version: '1.22.6'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
@@ -43,16 +43,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up Go 1.22
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: '1.22.3'
go-version: '1.22.6'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v5
with:
version: v1.55.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
@@ -62,13 +62,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.x
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
go-version: '1.22.3'
go-version: '1.22.6'
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Get dependencies
run: |

View File

@@ -6,7 +6,7 @@ run:
- proto
- tools/analyzers
timeout: 10m
go: '1.22.3'
go: '1.22.6'
linters:
enable-all: true

View File

@@ -26,7 +26,6 @@ approval_rules:
only_changed_files:
paths:
- "*pb.go"
- "*pb.gw.go"
- "*.bazel"
options:
ignore_commits_by:
@@ -69,7 +68,6 @@ approval_rules:
changed_files:
ignore:
- "*pb.go"
- "*pb.gw.go"
- "*.bazel"
options:
ignore_commits_by:

View File

@@ -55,13 +55,6 @@ alias(
visibility = ["//visibility:public"],
)
# Protobuf gRPC gateway compiler
alias(
name = "grpc_gateway_proto_compiler",
actual = "@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
visibility = ["//visibility:public"],
)
gometalinter(
name = "gometalinter",
config = "//:.gometalinter.json",

2802
CHANGELOG.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,9 @@ Excited by our work and want to get involved in building out our sharding releas
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PRs after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
> [!IMPORTANT]
> Please, **do not send pull requests for trivial changes**, such as typos, these will be rejected. These types of pull requests incur a cost to reviewers and do not provide much value to the project. If you are unsure, please open an issue first to discuss the change.
## Contribution Steps
**1. Set up Prysm following the instructions in README.md.**
@@ -120,15 +123,19 @@ $ git push myrepo feature-in-progress-branch
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
**16. Create a pull request.**
**16. Add an entry to CHANGELOG.md.**
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls.
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
**17. Respond to comments by Core Contributors.**
**17. Create a pull request.**
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
**18. Respond to comments by Core Contributors.**
Core Contributors may ask questions and request that you make edits. If you set notifications at the top of the page to “not watching,” you will still be notified by email whenever someone comments on the page of a pull request you have created. If you are asked to modify your pull request, repeat steps 8 through 15, then leave a comment to notify the Core Contributors that the pull request is ready for further review.
**18. If the number of commits becomes excessive, you may be asked to squash your commits.**
**19. If the number of commits becomes excessive, you may be asked to squash your commits.**
You can do this with an interactive rebase. Start by running the following command to determine the commit that is the base of your branch...
@@ -136,7 +143,7 @@ Core Contributors may ask questions and request that you make edits. If you set
$ git merge-base feature-in-progress-branch prysm/master
```
**19. The previous command will return a commit-hash that you should use in the following command.**
**20. The previous command will return a commit-hash that you should use in the following command.**
```
$ git rebase -i commit-hash
@@ -160,13 +167,30 @@ squash hash add a feature
Save and close the file, then a commit command will appear in the terminal that squashes the smaller commits into one. Check to be sure the commit message accurately reflects your changes and then hit enter to execute it.
**20. Update your pull request with the following command.**
**21. Update your pull request with the following command.**
```
$ git push myrepo feature-in-progress-branch -f
```
**21. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
**22. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
## Maintaining CHANGELOG.md
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
- `Added` for new features.
- `Changed` for changes in existing functionality.
- `Deprecated` for soon-to-be removed features.
- `Removed` for now removed features.
- `Fixed` for any bug fixes.
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
### Releasing
When a new release is made, the "Unreleased" section should be moved to a new section with the release version and the current date. Then a new "Unreleased" section is made at the top of the file with the categories listed above.
## Contributor Responsibilities

View File

@@ -227,7 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.3"
consensus_spec_version = "v1.5.0-alpha.6"
bls_test_version = "v0.1.1"
@@ -243,7 +243,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-+byv+GUOQytex5GgtjBGVoNDseJZbiBdAjEtlgCbjEo=",
integrity = "sha256-M7u/Ot/Vzorww+dFbHp0cxLyM2mezJjijCzq+LY3uvs=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -259,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-JJUy/jT1h3kGQkinTuzL7gMOA1+qgmPgJXVrYuH63Cg=",
integrity = "sha256-deOSeLRsmHXvkRp8n2bs3HXdkGUJWWqu8KFM/QABbZg=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -275,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-T2VM4Qd0SwgGnTjWxjOX297DqEsovO9Ueij1UEJy48Y=",
integrity = "sha256-Zz7YCf6XVf57nzSEGq9ToflJFHM0lAGwhd18l9Rf3hA=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -290,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-OP9BCBcQ7i+93bwj7ktY8pZ5uWsGjgTe4XTp7BDhX+I=",
integrity = "sha256-BoXckDxXnDcEmAjg/dQgf/tLiJsb6CT0aZvmWHFijrY=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -21,6 +21,7 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
@@ -28,7 +29,6 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -19,11 +19,11 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const (
@@ -146,7 +146,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
u := c.baseURL.ResolveReference(&url.URL{Path: path})
span.AddAttributes(trace.StringAttribute("url", u.String()),
span.SetAttributes(trace.StringAttribute("url", u.String()),
trace.StringAttribute("method", method))
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
@@ -259,7 +259,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
defer span.End()
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
span.SetAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
if len(svr) == 0 {
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
@@ -278,7 +278,11 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
}
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
return err
if err != nil {
return err
}
log.WithField("num_registrations", len(svr)).Info("successfully registered validator(s) on builder")
return nil
}
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")

View File

@@ -1,43 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"gateway.go",
"log.go",
"modifiers.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/gateway",
visibility = [
"//beacon-chain:__subpackages__",
"//validator:__subpackages__",
],
deps = [
"//api/server/middleware:go_default_library",
"//runtime:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//connectivity:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["gateway_test.go"],
embed = [":go_default_library"],
deps = [
"//cmd/beacon-chain/flags:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,212 +0,0 @@
// Package gateway defines a grpc-gateway server that serves HTTP-JSON traffic and acts a proxy between HTTP and gRPC.
package gateway
import (
"context"
"fmt"
"net"
"net/http"
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
"github.com/prysmaticlabs/prysm/v5/runtime"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
var _ runtime.Service = (*Gateway)(nil)
// PbMux serves grpc-gateway requests for selected patterns using registered protobuf handlers.
type PbMux struct {
Registrations []PbHandlerRegistration // Protobuf registrations to be registered in Mux.
Patterns []string // URL patterns that will be handled by Mux.
Mux *gwruntime.ServeMux // The router that will be used for grpc-gateway requests.
}
// PbHandlerRegistration is a function that registers a protobuf handler.
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
// MuxHandler is a function that implements the mux handler functionality.
type MuxHandler func(
h http.HandlerFunc,
w http.ResponseWriter,
req *http.Request,
)
// Config parameters for setting up the gateway service.
type config struct {
maxCallRecvMsgSize uint64
remoteCert string
gatewayAddr string
remoteAddr string
allowedOrigins []string
muxHandler MuxHandler
pbHandlers []*PbMux
router *mux.Router
timeout time.Duration
}
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
type Gateway struct {
cfg *config
conn *grpc.ClientConn
server *http.Server
cancel context.CancelFunc
ctx context.Context
startFailure error
}
// New returns a new instance of the Gateway.
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
g := &Gateway{
ctx: ctx,
cfg: &config{},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
g.cfg.router = mux.NewRouter()
}
return g, nil
}
// Start the gateway service.
func (g *Gateway) Start() {
ctx, cancel := context.WithCancel(g.ctx)
g.cancel = cancel
conn, err := g.dial(ctx, "tcp", g.cfg.remoteAddr)
if err != nil {
log.WithError(err).Error("Failed to connect to gRPC server")
g.startFailure = err
return
}
g.conn = conn
for _, h := range g.cfg.pbHandlers {
for _, r := range h.Registrations {
if err := r(ctx, h.Mux, g.conn); err != nil {
log.WithError(err).Error("Failed to register handler")
g.startFailure = err
return
}
}
for _, p := range h.Patterns {
g.cfg.router.PathPrefix(p).Handler(h.Mux)
}
}
corsMux := middleware.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
if g.cfg.muxHandler != nil {
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.cfg.muxHandler(corsMux.ServeHTTP, w, r)
})
}
g.server = &http.Server{
Addr: g.cfg.gatewayAddr,
Handler: corsMux,
ReadHeaderTimeout: time.Second,
}
go func() {
log.WithField("address", g.cfg.gatewayAddr).Info("Starting gRPC gateway")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start gRPC gateway")
g.startFailure = err
return
}
}()
}
// Status of grpc gateway. Returns an error if this service is unhealthy.
func (g *Gateway) Status() error {
if g.startFailure != nil {
return g.startFailure
}
if s := g.conn.GetState(); s != connectivity.Ready {
return fmt.Errorf("grpc server is %s", s)
}
return nil
}
// Stop the gateway with a graceful shutdown.
func (g *Gateway) Stop() error {
if g.server != nil {
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
defer shutdownCancel()
if err := g.server.Shutdown(shutdownCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("Existing connections terminated")
} else {
log.WithError(err).Error("Failed to gracefully shut down server")
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}
// dial the gRPC server.
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
switch network {
case "tcp":
return g.dialTCP(ctx, addr)
case "unix":
return g.dialUnix(ctx, addr)
default:
return nil, fmt.Errorf("unsupported network type %q", network)
}
}
// dialTCP creates a client connection via TCP.
// "addr" must be a valid TCP address with a port number.
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
var security grpc.DialOption
if len(g.cfg.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
if err != nil {
return nil, err
}
security = grpc.WithTransportCredentials(creds)
} else {
// Use insecure credentials when there's no remote cert provided.
security = grpc.WithTransportCredentials(insecure.NewCredentials())
}
opts := []grpc.DialOption{
security,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
// dialUnix creates a client connection via a unix domain socket.
// "addr" must be a valid path to the socket.
func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn, error) {
d := func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout)
}
f := func(ctx context.Context, addr string) (net.Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
return d(addr, time.Until(deadline))
}
return d(addr, 0)
}
opts := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(f),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}

View File

@@ -1,107 +0,0 @@
package gateway
import (
"context"
"flag"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func TestGateway_Customized(t *testing.T) {
r := mux.NewRouter()
cert := "cert"
origins := []string{"origin"}
size := uint64(100)
opts := []Option{
WithRouter(r),
WithRemoteCert(cert),
WithAllowedOrigins(origins),
WithMaxCallRecvMsgSize(size),
WithMuxHandler(func(
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
assert.Equal(t, r, g.cfg.router)
assert.Equal(t, cert, g.cfg.remoteCert)
require.Equal(t, 1, len(g.cfg.allowedOrigins))
assert.Equal(t, origins[0], g.cfg.allowedOrigins[0])
assert.Equal(t, size, g.cfg.maxCallRecvMsgSize)
}
func TestGateway_StartStop(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
WithMuxHandler(func(
_ http.HandlerFunc,
_ http.ResponseWriter,
_ *http.Request,
) {
}),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting gRPC gateway")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
rpcHost := ctx.String(flags.RPCHost.Name)
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
opts := []Option{
WithGatewayAddr(gatewayAddress),
WithRemoteAddr(selfAddress),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
writer := httptest.NewRecorder()
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

View File

@@ -1,5 +0,0 @@
package gateway
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "gateway")

View File

@@ -1,30 +0,0 @@
package gateway
import (
"context"
"net/http"
"strconv"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/protobuf/proto"
)
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
md, ok := gwruntime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
// set http status code
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
code, err := strconv.Atoi(vals[0])
if err != nil {
return err
}
// delete the headers to not expose any grpc-metadata in http response
delete(md.HeaderMD, "x-http-code")
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
w.WriteHeader(code)
}
return nil
}

View File

@@ -1,79 +0,0 @@
package gateway
import (
"time"
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
)
type Option func(g *Gateway) error
func WithPbHandlers(handlers []*PbMux) Option {
return func(g *Gateway) error {
g.cfg.pbHandlers = handlers
return nil
}
}
func WithMuxHandler(m MuxHandler) Option {
return func(g *Gateway) error {
g.cfg.muxHandler = m
return nil
}
}
func WithGatewayAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.gatewayAddr = addr
return nil
}
}
func WithRemoteAddr(addr string) Option {
return func(g *Gateway) error {
g.cfg.remoteAddr = addr
return nil
}
}
// WithRouter allows adding a custom mux router to the gateway.
func WithRouter(r *mux.Router) Option {
return func(g *Gateway) error {
g.cfg.router = r
return nil
}
}
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
func WithAllowedOrigins(origins []string) Option {
return func(g *Gateway) error {
g.cfg.allowedOrigins = origins
return nil
}
}
// WithRemoteCert allows adding a custom certificate to the gateway,
func WithRemoteCert(cert string) Option {
return func(g *Gateway) error {
g.cfg.remoteCert = cert
return nil
}
}
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
func WithMaxCallRecvMsgSize(size uint64) Option {
return func(g *Gateway) error {
g.cfg.maxCallRecvMsgSize = size
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(seconds uint64) Option {
return func(g *Gateway) error {
g.cfg.timeout = time.Second * time.Duration(seconds)
gwruntime.DefaultContextTimeout = time.Second * time.Duration(seconds)
return nil
}
}

View File

@@ -22,9 +22,7 @@ go_test(
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)

View File

@@ -2,8 +2,6 @@ package grpc
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
@@ -81,16 +79,3 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
}
return parent
}
// AppendCustomErrorHeader sets a CustomErrorMetadataKey gRPC header on the passed in context,
// using the passed in error data as the header's value. The data is serialized as JSON.
func AppendCustomErrorHeader(ctx context.Context, errorData interface{}) error {
j, err := json.Marshal(errorData)
if err != nil {
return fmt.Errorf("could not marshal error data into JSON: %w", err)
}
if err := grpc.SetHeader(ctx, metadata.Pairs(CustomErrorMetadataKey, string(j))); err != nil {
return fmt.Errorf("could not set custom error header: %w", err)
}
return nil
}

View File

@@ -2,15 +2,11 @@ package grpc
import (
"context"
"encoding/json"
"strings"
"testing"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
@@ -62,17 +58,3 @@ func TestAppendHeaders(t *testing.T) {
assert.Equal(t, "value=1", md.Get("first")[0])
})
}
func TestAppendCustomErrorHeader(t *testing.T) {
stream := &runtime.ServerTransportStream{}
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
data := &customErrorData{Message: "foo"}
require.NoError(t, AppendCustomErrorHeader(ctx, data))
// The stream used in test setup sets the metadata key in lowercase.
value, ok := stream.Header()[strings.ToLower(CustomErrorMetadataKey)]
require.Equal(t, true, ok, "Failed to retrieve custom error metadata value")
expected, err := json.Marshal(data)
require.NoError(t, err)
assert.Equal(t, string(expected), value[0])
}

View File

@@ -0,0 +1,31 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"options.go",
"server.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/httprest",
visibility = ["//visibility:public"],
deps = [
"//api/server/middleware:go_default_library",
"//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["server_test.go"],
embed = [":go_default_library"],
deps = [
"//cmd/beacon-chain/flags:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -0,0 +1,5 @@
package httprest
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "httprest")

View File

@@ -0,0 +1,44 @@
package httprest
import (
"time"
"net/http"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
)
// Option is a http rest server functional parameter type.
type Option func(g *Server) error
// WithMiddlewares sets the list of middlewares to be applied on routes.
func WithMiddlewares(mw []middleware.Middleware) Option {
return func(g *Server) error {
g.cfg.middlewares = mw
return nil
}
}
// WithHTTPAddr sets the full address ( host and port ) of the server.
func WithHTTPAddr(addr string) Option {
return func(g *Server) error {
g.cfg.httpAddr = addr
return nil
}
}
// WithRouter sets the internal router of the server, this is required.
func WithRouter(r *http.ServeMux) Option {
return func(g *Server) error {
g.cfg.router = r
return nil
}
}
// WithTimeout allows changing the timeout value for API calls.
func WithTimeout(duration time.Duration) Option {
return func(g *Server) error {
g.cfg.timeout = duration
return nil
}
}

View File

@@ -0,0 +1,101 @@
package httprest
import (
"context"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
"github.com/prysmaticlabs/prysm/v5/runtime"
)
var _ runtime.Service = (*Server)(nil)
// Config parameters for setting up the http-rest service.
type config struct {
httpAddr string
middlewares []middleware.Middleware
router http.Handler
timeout time.Duration
}
// Server serves HTTP traffic.
type Server struct {
cfg *config
server *http.Server
cancel context.CancelFunc
ctx context.Context
startFailure error
}
// New returns a new instance of the Server.
func New(ctx context.Context, opts ...Option) (*Server, error) {
g := &Server{
ctx: ctx,
cfg: &config{},
}
for _, opt := range opts {
if err := opt(g); err != nil {
return nil, err
}
}
if g.cfg.router == nil {
return nil, errors.New("router option not configured")
}
var handler http.Handler
defaultReadHeaderTimeout := time.Second
handler = middleware.MiddlewareChain(g.cfg.router, g.cfg.middlewares)
if g.cfg.timeout > 0*time.Second {
defaultReadHeaderTimeout = g.cfg.timeout
handler = http.TimeoutHandler(handler, g.cfg.timeout, "request timed out")
}
g.server = &http.Server{
Addr: g.cfg.httpAddr,
Handler: handler,
ReadHeaderTimeout: defaultReadHeaderTimeout,
}
return g, nil
}
// Start the http rest service.
func (g *Server) Start() {
g.ctx, g.cancel = context.WithCancel(g.ctx)
go func() {
log.WithField("address", g.cfg.httpAddr).Info("Starting HTTP server")
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
log.WithError(err).Error("Failed to start HTTP server")
g.startFailure = err
return
}
}()
}
// Status of the HTTP server. Returns an error if this service is unhealthy.
func (g *Server) Status() error {
if g.startFailure != nil {
return g.startFailure
}
return nil
}
// Stop the HTTP server with a graceful shutdown.
func (g *Server) Stop() error {
if g.server != nil {
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
defer shutdownCancel()
if err := g.server.Shutdown(shutdownCtx); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("Existing connections terminated")
} else {
log.WithError(err).Error("Failed to gracefully shut down server")
}
}
}
if g.cancel != nil {
g.cancel()
}
return nil
}

View File

@@ -0,0 +1,71 @@
package httprest
import (
"context"
"flag"
"fmt"
"net"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
"github.com/urfave/cli/v2"
)
func TestServer_StartStop(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
port := ctx.Int(flags.HTTPServerPort.Name)
portStr := fmt.Sprintf("%d", port) // Convert port to string
host := ctx.String(flags.HTTPServerHost.Name)
address := net.JoinHostPort(host, portStr)
handler := http.NewServeMux()
opts := []Option{
WithHTTPAddr(address),
WithRouter(handler),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
g.Start()
go func() {
require.LogsContain(t, hook, "Starting HTTP server")
require.LogsDoNotContain(t, hook, "Starting API middleware")
}()
err = g.Stop()
require.NoError(t, err)
}
func TestServer_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
app := cli.App{}
set := flag.NewFlagSet("test", 0)
ctx := cli.NewContext(&app, set, nil)
handler := http.NewServeMux()
port := ctx.Int(flags.HTTPServerPort.Name)
portStr := fmt.Sprintf("%d", port) // Convert port to string
host := ctx.String(flags.HTTPServerHost.Name)
address := net.JoinHostPort(host, portStr)
opts := []Option{
WithHTTPAddr(address),
WithRouter(handler),
}
g, err := New(context.Background(), opts...)
require.NoError(t, err)
writer := httptest.NewRecorder()
g.cfg.router.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
assert.Equal(t, http.StatusNotFound, writer.Code)
}

View File

@@ -8,10 +8,7 @@ go_library(
],
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
visibility = ["//visibility:public"],
deps = [
"@com_github_gorilla_mux//:go_default_library",
"@com_github_rs_cors//:go_default_library",
],
deps = ["@com_github_rs_cors//:go_default_library"],
)
go_test(

View File

@@ -5,10 +5,11 @@ import (
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
type Middleware func(http.Handler) http.Handler
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -21,7 +22,7 @@ func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
}
// CorsHandler sets the cors settings on api endpoints
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
func CorsHandler(allowOrigins []string) Middleware {
c := cors.New(cors.Options{
AllowedOrigins: allowOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
@@ -34,7 +35,7 @@ func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
}
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
func ContentTypeHandler(acceptedMediaTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// skip the GET request
@@ -67,7 +68,7 @@ func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
}
// AcceptHeaderHandler checks if the client's response preference is handled
func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
func AcceptHeaderHandler(serverAcceptedTypes []string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
acceptHeader := r.Header.Get("Accept")
@@ -110,3 +111,15 @@ func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
})
}
}
func MiddlewareChain(h http.Handler, mw []Middleware) http.Handler {
if len(mw) < 1 {
return h
}
wrapped := h
for i := len(mw) - 1; i >= 0; i-- {
wrapped = mw[i](wrapped)
}
return wrapped
}

View File

@@ -6,6 +6,7 @@ go_library(
"block.go",
"conversions.go",
"conversions_block.go",
"conversions_lightclient.go",
"conversions_state.go",
"endpoints_beacon.go",
"endpoints_blob.go",
@@ -33,6 +34,9 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",

View File

@@ -317,6 +317,96 @@ type BlindedBeaconBlockBodyDeneb struct {
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type SignedBeaconBlockContentsElectra struct {
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type BeaconBlockContentsElectra struct {
Block *BeaconBlockElectra `json:"block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type SignedBeaconBlockElectra struct {
Message *BeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
func (s *SignedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockElectra) SigString() string {
return s.Signature
}
type BeaconBlockElectra struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BeaconBlockBodyElectra `json:"body"`
}
type BeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type BlindedBeaconBlockElectra struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BlindedBeaconBlockBodyElectra `json:"body"`
}
type SignedBlindedBeaconBlockElectra struct {
Message *BlindedBeaconBlockElectra `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockElectra{}
func (s *SignedBlindedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockElectra) SigString() string {
return s.Signature
}
type BlindedBeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type SignedBeaconBlockHeaderContainer struct {
Header *SignedBeaconBlockHeader `json:"header"`
Root string `json:"root"`
@@ -426,6 +516,8 @@ type ExecutionPayloadDeneb struct {
ExcessBlobGas string `json:"excess_blob_gas"`
}
type ExecutionPayloadElectra = ExecutionPayloadDeneb
type ExecutionPayloadHeaderDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
@@ -445,3 +537,11 @@ type ExecutionPayloadHeaderDeneb struct {
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
type ExecutionPayloadHeaderElectra = ExecutionPayloadHeaderDeneb
type ExecutionRequests struct {
Deposits []*DepositRequest `json:"deposits"`
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
Consolidations []*ConsolidationRequest `json:"consolidations"`
}

View File

@@ -340,6 +340,42 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
}, nil
}
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
sig, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SignedAggregateAttestationAndProofElectra{
Message: msg,
Signature: sig,
}, nil
}
func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttestationAndProofElectra, error) {
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AggregatorIndex")
}
agg, err := a.Aggregate.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Aggregate")
}
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
return &eth.AggregateAttestationAndProofElectra{
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
Aggregate: agg,
SelectionProof: proof,
}, nil
}
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
@@ -369,6 +405,41 @@ func AttFromConsensus(a *eth.Attestation) *Attestation {
}
}
func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
return nil, server.NewDecodeError(err, "AggregationBits")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
committeeBits, err := hexutil.Decode(a.CommitteeBits)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeBits")
}
return &eth.AttestationElectra{
AggregationBits: aggBits,
Data: data,
Signature: sig,
CommitteeBits: committeeBits,
}, nil
}
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
return &AttestationElectra{
AggregationBits: hexutil.Encode(a.AggregationBits),
Data: AttDataFromConsensus(a.Data),
Signature: hexutil.Encode(a.Signature),
CommitteeBits: hexutil.Encode(a.CommitteeBits),
}
}
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
slot, err := strconv.ParseUint(a.Slot, 10, 64)
if err != nil {
@@ -623,6 +694,18 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
return &eth.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
}
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
att1, err := s.Attestation1.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation1")
}
att2, err := s.Attestation2.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Attestation2")
}
return &eth.AttesterSlashingElectra{Attestation_1: att1, Attestation_2: att2}, nil
}
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
indices := make([]uint64, len(a.AttestingIndices))
var err error
@@ -648,6 +731,31 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
}, nil
}
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
indices := make([]uint64, len(a.AttestingIndices))
var err error
for i, ix := range a.AttestingIndices {
indices[i], err = strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
}
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.IndexedAttestationElectra{
AttestingIndices: indices,
Data: data,
Signature: sig,
}, nil
}
func WithdrawalsFromConsensus(ws []*enginev1.Withdrawal) []*Withdrawal {
result := make([]*Withdrawal, len(ws))
for i, w := range ws {
@@ -665,6 +773,126 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
}
}
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
result := make([]*WithdrawalRequest, len(ws))
for i, w := range ws {
result[i] = WithdrawalRequestFromConsensus(w)
}
return result
}
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
return &WithdrawalRequest{
SourceAddress: hexutil.Encode(w.SourceAddress),
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
Amount: fmt.Sprintf("%d", w.Amount),
}
}
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "ValidatorPubkey")
}
amount, err := strconv.ParseUint(w.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
return &enginev1.WithdrawalRequest{
SourceAddress: src,
ValidatorPubkey: pubkey,
Amount: amount,
}, nil
}
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
result := make([]*ConsolidationRequest, len(cs))
for i, c := range cs {
result[i] = ConsolidationRequestFromConsensus(c)
}
return result
}
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
return &ConsolidationRequest{
SourceAddress: hexutil.Encode(c.SourceAddress),
SourcePubkey: hexutil.Encode(c.SourcePubkey),
TargetPubkey: hexutil.Encode(c.TargetPubkey),
}
}
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourceAddress")
}
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "SourcePubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "TargetPubkey")
}
return &enginev1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: srcPubkey,
TargetPubkey: targetPubkey,
}, nil
}
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
result := make([]*DepositRequest, len(ds))
for i, d := range ds {
result[i] = DepositRequestFromConsensus(d)
}
return result
}
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
return &DepositRequest{
Pubkey: hexutil.Encode(d.Pubkey),
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
Amount: fmt.Sprintf("%d", d.Amount),
Signature: hexutil.Encode(d.Signature),
Index: fmt.Sprintf("%d", d.Index),
}
}
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "Pubkey")
}
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
}
amount, err := strconv.ParseUint(d.Amount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Amount")
}
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
index, err := strconv.ParseUint(d.Index, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Index")
}
return &enginev1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: withdrawalCredentials,
Amount: amount,
Signature: sig,
Index: index,
}, nil
}
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
if src == nil {
return nil, errNilValue
@@ -930,6 +1158,138 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
}
}
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
if src == nil {
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 2)
if err != nil {
return nil, err
}
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
for i, s := range src {
if s == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
}
if s.Attestation1 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
}
if s.Attestation2 == nil {
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
}
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
}
a1AttestingIndices := make([]uint64, len(s.Attestation1.AttestingIndices))
for j, ix := range s.Attestation1.AttestingIndices {
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices[%d]", i, j))
}
a1AttestingIndices[j] = attestingIndex
}
a1Data, err := s.Attestation1.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
}
a2Sig, err := bytesutil.DecodeHexWithLength(s.Attestation2.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
}
a2AttestingIndices := make([]uint64, len(s.Attestation2.AttestingIndices))
for j, ix := range s.Attestation2.AttestingIndices {
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices[%d]", i, j))
}
a2AttestingIndices[j] = attestingIndex
}
a2Data, err := s.Attestation2.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Data", i))
}
attesterSlashings[i] = &eth.AttesterSlashingElectra{
Attestation_1: &eth.IndexedAttestationElectra{
AttestingIndices: a1AttestingIndices,
Data: a1Data,
Signature: a1Sig,
},
Attestation_2: &eth.IndexedAttestationElectra{
AttestingIndices: a2AttestingIndices,
Data: a2Data,
Signature: a2Sig,
},
}
}
return attesterSlashings, nil
}
func AttesterSlashingsElectraFromConsensus(src []*eth.AttesterSlashingElectra) []*AttesterSlashingElectra {
attesterSlashings := make([]*AttesterSlashingElectra, len(src))
for i, s := range src {
attesterSlashings[i] = AttesterSlashingElectraFromConsensus(s)
}
return attesterSlashings
}
func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *AttesterSlashingElectra {
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
for j, ix := range src.Attestation_1.AttestingIndices {
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
for j, ix := range src.Attestation_2.AttestingIndices {
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
return &AttesterSlashingElectra{
Attestation1: &IndexedAttestationElectra{
AttestingIndices: a1AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_1.Signature),
},
Attestation2: &IndexedAttestationElectra{
AttestingIndices: a2AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_2.Signature),
},
}
}
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
if src == nil {
return nil, errNilValue
@@ -957,6 +1317,33 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
return atts
}
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
if src == nil {
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 8)
if err != nil {
return nil, err
}
atts := make([]*eth.AttestationElectra, len(src))
for i, a := range src {
atts[i], err = a.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
}
}
return atts, nil
}
func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElectra {
atts := make([]*AttestationElectra, len(src))
for i, a := range src {
atts[i] = AttElectraFromConsensus(a)
}
return atts
}
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
if src == nil {
return nil, errNilValue
@@ -1087,3 +1474,37 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
ExecutionBlockHeight: fmt.Sprintf("%d", ds.ExecutionDepth),
}
}
func PendingBalanceDepositsFromConsensus(ds []*eth.PendingBalanceDeposit) []*PendingBalanceDeposit {
deposits := make([]*PendingBalanceDeposit, len(ds))
for i, d := range ds {
deposits[i] = &PendingBalanceDeposit{
Index: fmt.Sprintf("%d", d.Index),
Amount: fmt.Sprintf("%d", d.Amount),
}
}
return deposits
}
func PendingPartialWithdrawalsFromConsensus(ws []*eth.PendingPartialWithdrawal) []*PendingPartialWithdrawal {
withdrawals := make([]*PendingPartialWithdrawal, len(ws))
for i, w := range ws {
withdrawals[i] = &PendingPartialWithdrawal{
Index: fmt.Sprintf("%d", w.Index),
Amount: fmt.Sprintf("%d", w.Amount),
WithdrawableEpoch: fmt.Sprintf("%d", w.WithdrawableEpoch),
}
}
return withdrawals
}
func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*PendingConsolidation {
consolidations := make([]*PendingConsolidation, len(cs))
for i, c := range cs {
consolidations[i] = &PendingConsolidation{
SourceIndex: fmt.Sprintf("%d", c.SourceIndex),
TargetIndex: fmt.Sprintf("%d", c.TargetIndex),
}
}
return consolidations
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,122 @@
package structs
import (
"encoding/json"
"fmt"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
v2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v5/proto/migration"
)
func LightClientUpdateFromConsensus(update *v2.LightClientUpdate) (*LightClientUpdate, error) {
attestedHeader, err := lightClientHeaderContainerToJSON(update.AttestedHeader)
if err != nil {
return nil, errors.Wrap(err, "could not marshal attested light client header")
}
finalizedHeader, err := lightClientHeaderContainerToJSON(update.FinalizedHeader)
if err != nil {
return nil, errors.Wrap(err, "could not marshal finalized light client header")
}
return &LightClientUpdate{
AttestedHeader: attestedHeader,
NextSyncCommittee: SyncCommitteeFromConsensus(migration.V2SyncCommitteeToV1Alpha1(update.NextSyncCommittee)),
NextSyncCommitteeBranch: branchToJSON(update.NextSyncCommitteeBranch),
FinalizedHeader: finalizedHeader,
FinalityBranch: branchToJSON(update.FinalityBranch),
SyncAggregate: syncAggregateToJSON(update.SyncAggregate),
SignatureSlot: strconv.FormatUint(uint64(update.SignatureSlot), 10),
}, nil
}
func LightClientFinalityUpdateFromConsensus(update *v2.LightClientFinalityUpdate) (*LightClientFinalityUpdate, error) {
attestedHeader, err := lightClientHeaderContainerToJSON(update.AttestedHeader)
if err != nil {
return nil, errors.Wrap(err, "could not marshal attested light client header")
}
finalizedHeader, err := lightClientHeaderContainerToJSON(update.FinalizedHeader)
if err != nil {
return nil, errors.Wrap(err, "could not marshal finalized light client header")
}
return &LightClientFinalityUpdate{
AttestedHeader: attestedHeader,
FinalizedHeader: finalizedHeader,
FinalityBranch: branchToJSON(update.FinalityBranch),
SyncAggregate: syncAggregateToJSON(update.SyncAggregate),
SignatureSlot: strconv.FormatUint(uint64(update.SignatureSlot), 10),
}, nil
}
func LightClientOptimisticUpdateFromConsensus(update *v2.LightClientOptimisticUpdate) (*LightClientOptimisticUpdate, error) {
attestedHeader, err := lightClientHeaderContainerToJSON(update.AttestedHeader)
if err != nil {
return nil, errors.Wrap(err, "could not marshal attested light client header")
}
return &LightClientOptimisticUpdate{
AttestedHeader: attestedHeader,
SyncAggregate: syncAggregateToJSON(update.SyncAggregate),
SignatureSlot: strconv.FormatUint(uint64(update.SignatureSlot), 10),
}, nil
}
func branchToJSON(branchBytes [][]byte) []string {
if branchBytes == nil {
return nil
}
branch := make([]string, len(branchBytes))
for i, root := range branchBytes {
branch[i] = hexutil.Encode(root)
}
return branch
}
func syncAggregateToJSON(input *v1.SyncAggregate) *SyncAggregate {
return &SyncAggregate{
SyncCommitteeBits: hexutil.Encode(input.SyncCommitteeBits),
SyncCommitteeSignature: hexutil.Encode(input.SyncCommitteeSignature),
}
}
func lightClientHeaderContainerToJSON(container *v2.LightClientHeaderContainer) (json.RawMessage, error) {
beacon, err := container.GetBeacon()
if err != nil {
return nil, errors.Wrap(err, "could not get beacon block header")
}
var header any
switch t := (container.Header).(type) {
case *v2.LightClientHeaderContainer_HeaderAltair:
header = &LightClientHeader{Beacon: BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(beacon))}
case *v2.LightClientHeaderContainer_HeaderCapella:
execution, err := ExecutionPayloadHeaderCapellaFromConsensus(t.HeaderCapella.Execution)
if err != nil {
return nil, err
}
header = &LightClientHeaderCapella{
Beacon: BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(beacon)),
Execution: execution,
ExecutionBranch: branchToJSON(t.HeaderCapella.ExecutionBranch),
}
case *v2.LightClientHeaderContainer_HeaderDeneb:
execution, err := ExecutionPayloadHeaderDenebFromConsensus(t.HeaderDeneb.Execution)
if err != nil {
return nil, err
}
header = &LightClientHeaderDeneb{
Beacon: BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(beacon)),
Execution: execution,
ExecutionBranch: branchToJSON(t.HeaderDeneb.ExecutionBranch),
}
default:
return nil, fmt.Errorf("unsupported header type %T", t)
}
return json.Marshal(header)
}

View File

@@ -593,3 +593,185 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
HistoricalSummaries: hs,
}, nil
}
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr, err := st.HistoricalRoots()
if err != nil {
return nil, err
}
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
execData, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, errPayloadHeaderNotFound
}
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingBalanceDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
return &BeaconStateElectra{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
LatestExecutionPayloadHeader: payload,
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingBalanceDeposits: PendingBalanceDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
}, nil
}

View File

@@ -225,3 +225,19 @@ type IndividualVote struct {
InclusionDistance string `json:"inclusion_distance"`
InactivityScore string `json:"inactivity_score"`
}
type ChainHead struct {
HeadSlot string `json:"head_slot"`
HeadEpoch string `json:"head_epoch"`
HeadBlockRoot string `json:"head_block_root"`
FinalizedSlot string `json:"finalized_slot"`
FinalizedEpoch string `json:"finalized_epoch"`
FinalizedBlockRoot string `json:"finalized_block_root"`
JustifiedSlot string `json:"justified_slot"`
JustifiedEpoch string `json:"justified_epoch"`
JustifiedBlockRoot string `json:"justified_block_root"`
PreviousJustifiedSlot string `json:"previous_justified_slot"`
PreviousJustifiedEpoch string `json:"previous_justified_epoch"`
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
OptimisticStatus bool `json:"optimistic_status"`
}

View File

@@ -96,21 +96,7 @@ type LightClientFinalityUpdateEvent struct {
Data *LightClientFinalityUpdate `json:"data"`
}
type LightClientFinalityUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
FinalizedHeader *BeaconBlockHeader `json:"finalized_header"`
FinalityBranch []string `json:"finality_branch"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientOptimisticUpdateEvent struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type LightClientOptimisticUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}

View File

@@ -1,31 +1,73 @@
package structs
import "encoding/json"
type LightClientHeader struct {
Beacon *BeaconBlockHeader `json:"beacon"`
}
type LightClientHeaderCapella struct {
Beacon *BeaconBlockHeader `json:"beacon"`
Execution *ExecutionPayloadHeaderCapella `json:"execution"`
ExecutionBranch []string `json:"execution_branch"`
}
type LightClientHeaderDeneb struct {
Beacon *BeaconBlockHeader `json:"beacon"`
Execution *ExecutionPayloadHeaderDeneb `json:"execution"`
ExecutionBranch []string `json:"execution_branch"`
}
type LightClientBootstrap struct {
Header json.RawMessage `json:"header"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
}
type LightClientUpdate struct {
AttestedHeader json.RawMessage `json:"attested_header"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
FinalizedHeader json.RawMessage `json:"finalized_header,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
FinalityBranch []string `json:"finality_branch,omitempty"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientFinalityUpdate struct {
AttestedHeader json.RawMessage `json:"attested_header"`
FinalizedHeader json.RawMessage `json:"finalized_header"`
FinalityBranch []string `json:"finality_branch"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientOptimisticUpdate struct {
AttestedHeader json.RawMessage `json:"attested_header"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientBootstrapResponse struct {
Version string `json:"version"`
Data *LightClientBootstrap `json:"data"`
}
type LightClientBootstrap struct {
Header *BeaconBlockHeader `json:"header"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
}
type LightClientUpdate struct {
AttestedHeader *BeaconBlockHeader `json:"attested_header"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee,omitempty"`
FinalizedHeader *BeaconBlockHeader `json:"finalized_header,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"`
FinalityBranch []string `json:"finality_branch,omitempty"`
SignatureSlot string `json:"signature_slot"`
}
type LightClientUpdateWithVersion struct {
type LightClientUpdateResponse struct {
Version string `json:"version"`
Data *LightClientUpdate `json:"data"`
}
type LightClientUpdatesByRangeResponse struct {
Updates []*LightClientUpdateWithVersion `json:"updates"`
type LightClientFinalityUpdateResponse struct {
Version string `json:"version"`
Data *LightClientFinalityUpdate `json:"data"`
}
type LightClientOptimisticUpdateResponse struct {
Version string `json:"version"`
Data *LightClientOptimisticUpdate `json:"data"`
}
type LightClientUpdatesByRangeResponse struct {
Updates []*LightClientUpdateResponse `json:"updates"`
}

View File

@@ -118,3 +118,34 @@ type GetValidatorPerformanceResponse struct {
MissingValidators [][]byte `json:"missing_validators,omitempty"`
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
}
type GetValidatorParticipationResponse struct {
Epoch string `json:"epoch"`
Finalized bool `json:"finalized"`
Participation *ValidatorParticipation `json:"participation"`
}
type ValidatorParticipation struct {
GlobalParticipationRate string `json:"global_participation_rate" deprecated:"true"`
VotedEther string `json:"voted_ether" deprecated:"true"`
EligibleEther string `json:"eligible_ether" deprecated:"true"`
CurrentEpochActiveGwei string `json:"current_epoch_active_gwei"`
CurrentEpochAttestingGwei string `json:"current_epoch_attesting_gwei"`
CurrentEpochTargetAttestingGwei string `json:"current_epoch_target_attesting_gwei"`
PreviousEpochActiveGwei string `json:"previous_epoch_active_gwei"`
PreviousEpochAttestingGwei string `json:"previous_epoch_attesting_gwei"`
PreviousEpochTargetAttestingGwei string `json:"previous_epoch_target_attesting_gwei"`
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
}
type ActiveSetChanges struct {
Epoch string `json:"epoch"`
ActivatedPublicKeys []string `json:"activated_public_keys"`
ActivatedIndices []string `json:"activated_indices"`
ExitedPublicKeys []string `json:"exited_public_keys"`
ExitedIndices []string `json:"exited_indices"`
SlashedPublicKeys []string `json:"slashed_public_keys"`
SlashedIndices []string `json:"slashed_indices"`
EjectedPublicKeys []string `json:"ejected_public_keys"`
EjectedIndices []string `json:"ejected_indices"`
}

View File

@@ -29,6 +29,13 @@ type Attestation struct {
Signature string `json:"signature"`
}
type AttestationElectra struct {
AggregationBits string `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
CommitteeBits string `json:"committee_bits"`
}
type AttestationData struct {
Slot string `json:"slot"`
CommitteeIndex string `json:"index"`
@@ -78,6 +85,17 @@ type AggregateAttestationAndProof struct {
SelectionProof string `json:"selection_proof"`
}
type SignedAggregateAttestationAndProofElectra struct {
Message *AggregateAttestationAndProofElectra `json:"message"`
Signature string `json:"signature"`
}
type AggregateAttestationAndProofElectra struct {
AggregatorIndex string `json:"aggregator_index"`
Aggregate *AttestationElectra `json:"aggregate"`
SelectionProof string `json:"selection_proof"`
}
type SyncCommitteeSubscription struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
@@ -178,6 +196,11 @@ type AttesterSlashing struct {
Attestation2 *IndexedAttestation `json:"attestation_2"`
}
type AttesterSlashingElectra struct {
Attestation1 *IndexedAttestationElectra `json:"attestation_1"`
Attestation2 *IndexedAttestationElectra `json:"attestation_2"`
}
type Deposit struct {
Proof []string `json:"proof"`
Data *DepositData `json:"data"`
@@ -196,6 +219,12 @@ type IndexedAttestation struct {
Signature string `json:"signature"`
}
type IndexedAttestationElectra struct {
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type SyncAggregate struct {
SyncCommitteeBits string `json:"sync_committee_bits"`
SyncCommitteeSignature string `json:"sync_committee_signature"`
@@ -207,3 +236,39 @@ type Withdrawal struct {
ExecutionAddress string `json:"address"`
Amount string `json:"amount"`
}
type DepositRequest struct {
Pubkey string `json:"pubkey"`
WithdrawalCredentials string `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature string `json:"signature"`
Index string `json:"index"`
}
type WithdrawalRequest struct {
SourceAddress string `json:"source_address"`
ValidatorPubkey string `json:"validator_pubkey"`
Amount string `json:"amount"`
}
type ConsolidationRequest struct {
SourceAddress string `json:"source_address"`
SourcePubkey string `json:"source_pubkey"`
TargetPubkey string `json:"target_pubkey"`
}
type PendingBalanceDeposit struct {
Index string `json:"index"`
Amount string `json:"amount"`
}
type PendingPartialWithdrawal struct {
Index string `json:"index"`
Amount string `json:"amount"`
WithdrawableEpoch string `json:"withdrawable_epoch"`
}
type PendingConsolidation struct {
SourceIndex string `json:"source_index"`
TargetIndex string `json:"target_index"`
}

View File

@@ -140,3 +140,43 @@ type BeaconStateDeneb struct {
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
}
type BeaconStateElectra struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingBalanceDeposits []*PendingBalanceDeposit `json:"pending_balance_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
}

View File

@@ -8,22 +8,20 @@ go_library(
],
importpath = "github.com/prysmaticlabs/prysm/v5/async/event",
visibility = ["//visibility:public"],
deps = ["//time/mclock:go_default_library"],
deps = [
"//time/mclock:go_default_library",
"@com_github_ethereum_go_ethereum//event:go_default_library",
],
)
go_test(
name = "go_default_test",
size = "small",
srcs = [
"example_feed_test.go",
"example_scope_test.go",
"example_subscription_test.go",
"feed_test.go",
"subscription_test.go",
],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -1,73 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package event_test
import (
"fmt"
"github.com/prysmaticlabs/prysm/v5/async/event"
)
func ExampleFeed_acknowledgedEvents() {
// This example shows how the return value of Send can be used for request/reply
// interaction between event consumers and producers.
var feed event.Feed
type ackedEvent struct {
i int
ack chan<- struct{}
}
// Consumers wait for events on the feed and acknowledge processing.
done := make(chan struct{})
defer close(done)
for i := 0; i < 3; i++ {
ch := make(chan ackedEvent, 100)
sub := feed.Subscribe(ch)
go func() {
defer sub.Unsubscribe()
for {
select {
case ev := <-ch:
fmt.Println(ev.i) // "process" the event
ev.ack <- struct{}{}
case <-done:
return
}
}
}()
}
// The producer sends values of type ackedEvent with increasing values of i.
// It waits for all consumers to acknowledge before sending the next event.
for i := 0; i < 3; i++ {
acksignal := make(chan struct{})
n := feed.Send(ackedEvent{i, acksignal})
for ack := 0; ack < n; ack++ {
<-acksignal
}
}
// Output:
// 0
// 0
// 0
// 1
// 1
// 1
// 2
// 2
// 2
}

View File

@@ -14,241 +14,11 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package event contains an event feed implementation for process communication.
package event
import (
"errors"
"reflect"
"slices"
"sync"
geth_event "github.com/ethereum/go-ethereum/event"
)
var errBadChannel = errors.New("event: Subscribe argument does not have sendable channel type")
// Feed implements one-to-many subscriptions where the carrier of events is a channel.
// Values sent to a Feed are delivered to all subscribed channels simultaneously.
//
// Feeds can only be used with a single type. The type is determined by the first Send or
// Subscribe operation. Subsequent calls to these methods panic if the type does not
// match.
//
// The zero value is ready to use.
type Feed struct {
once sync.Once // ensures that init only runs once
sendLock chan struct{} // sendLock has a one-element buffer and is empty when held.It protects sendCases.
removeSub chan interface{} // interrupts Send
sendCases caseList // the active set of select cases used by Send
// The inbox holds newly subscribed channels until they are added to sendCases.
mu sync.Mutex
inbox caseList
etype reflect.Type
}
// This is the index of the first actual subscription channel in sendCases.
// sendCases[0] is a SelectRecv case for the removeSub channel.
const firstSubSendCase = 1
type feedTypeError struct {
got, want reflect.Type
op string
}
func (e feedTypeError) Error() string {
return "event: wrong type in " + e.op + " got " + e.got.String() + ", want " + e.want.String()
}
func (f *Feed) init() {
f.removeSub = make(chan interface{})
f.sendLock = make(chan struct{}, 1)
f.sendLock <- struct{}{}
f.sendCases = caseList{{Chan: reflect.ValueOf(f.removeSub), Dir: reflect.SelectRecv}}
}
// Subscribe adds a channel to the feed. Future sends will be delivered on the channel
// until the subscription is canceled. All channels added must have the same element type.
//
// The channel should have ample buffer space to avoid blocking other subscribers.
// Slow subscribers are not dropped.
func (f *Feed) Subscribe(channel interface{}) Subscription {
f.once.Do(f.init)
chanval := reflect.ValueOf(channel)
chantyp := chanval.Type()
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.SendDir == 0 {
panic(errBadChannel)
}
sub := &feedSub{feed: f, channel: chanval, err: make(chan error, 1)}
f.mu.Lock()
defer f.mu.Unlock()
if !f.typecheck(chantyp.Elem()) {
panic(feedTypeError{op: "Subscribe", got: chantyp, want: reflect.ChanOf(reflect.SendDir, f.etype)})
}
// Add the select case to the inbox.
// The next Send will add it to f.sendCases.
cas := reflect.SelectCase{Dir: reflect.SelectSend, Chan: chanval}
f.inbox = append(f.inbox, cas)
return sub
}
// note: callers must hold f.mu
func (f *Feed) typecheck(typ reflect.Type) bool {
if f.etype == nil {
f.etype = typ
return true
}
// In the event the feed's type is an actual interface, we
// perform an interface conformance check here.
if f.etype.Kind() == reflect.Interface && typ.Implements(f.etype) {
return true
}
return f.etype == typ
}
func (f *Feed) remove(sub *feedSub) {
// Delete from inbox first, which covers channels
// that have not been added to f.sendCases yet.
ch := sub.channel.Interface()
f.mu.Lock()
index := f.inbox.find(ch)
if index != -1 {
f.inbox = f.inbox.delete(index)
f.mu.Unlock()
return
}
f.mu.Unlock()
select {
case f.removeSub <- ch:
// Send will remove the channel from f.sendCases.
case <-f.sendLock:
// No Send is in progress, delete the channel now that we have the send lock.
f.sendCases = f.sendCases.delete(f.sendCases.find(ch))
f.sendLock <- struct{}{}
}
}
// Send delivers to all subscribed channels simultaneously.
// It returns the number of subscribers that the value was sent to.
func (f *Feed) Send(value interface{}) (nsent int) {
rvalue := reflect.ValueOf(value)
f.once.Do(f.init)
<-f.sendLock
// Add new cases from the inbox after taking the send lock.
f.mu.Lock()
f.sendCases = append(f.sendCases, f.inbox...)
f.inbox = nil
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()
// Set the sent value on all channels.
for i := firstSubSendCase; i < len(f.sendCases); i++ {
f.sendCases[i].Send = rvalue
}
// Send until all channels except removeSub have been chosen. 'cases' tracks a prefix
// of sendCases. When a send succeeds, the corresponding case moves to the end of
// 'cases' and it shrinks by one element.
cases := f.sendCases
for {
// Fast path: try sending without blocking before adding to the select set.
// This should usually succeed if subscribers are fast enough and have free
// buffer space.
for i := firstSubSendCase; i < len(cases); i++ {
if cases[i].Chan.TrySend(rvalue) {
nsent++
cases = cases.deactivate(i)
i--
}
}
if len(cases) == firstSubSendCase {
break
}
// Select on all the receivers, waiting for them to unblock.
chosen, recv, _ := reflect.Select(cases)
if chosen == 0 /* <-f.removeSub */ {
index := f.sendCases.find(recv.Interface())
f.sendCases = f.sendCases.delete(index)
if index >= 0 && index < len(cases) {
// Shrink 'cases' too because the removed case was still active.
cases = f.sendCases[:len(cases)-1]
}
} else {
cases = cases.deactivate(chosen)
nsent++
}
}
// Forget about the sent value and hand off the send lock.
for i := firstSubSendCase; i < len(f.sendCases); i++ {
f.sendCases[i].Send = reflect.Value{}
}
f.sendLock <- struct{}{}
return nsent
}
type feedSub struct {
feed *Feed
channel reflect.Value
errOnce sync.Once
err chan error
}
// Unsubscribe remove feed subscription.
func (sub *feedSub) Unsubscribe() {
sub.errOnce.Do(func() {
sub.feed.remove(sub)
close(sub.err)
})
}
// Err returns error channel.
func (sub *feedSub) Err() <-chan error {
return sub.err
}
type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
}
// delete removes the given case from cs.
func (cs caseList) delete(index int) caseList {
return append(cs[:index], cs[index+1:]...)
}
// deactivate moves the case at index into the non-accessible portion of the cs slice.
func (cs caseList) deactivate(index int) caseList {
last := len(cs) - 1
cs[index], cs[last] = cs[last], cs[index]
return cs[:last]
}
// func (cs caseList) String() string {
// s := "["
// for i, cas := range cs {
// if i != 0 {
// s += ", "
// }
// switch cas.Dir {
// case reflect.SelectSend:
// s += fmt.Sprintf("%v<-", cas.Chan.Interface())
// case reflect.SelectRecv:
// s += fmt.Sprintf("<-%v", cas.Chan.Interface())
// }
// }
// return s + "]"
// }
// Feed is a re-export of the go-ethereum event feed.
type Feed = geth_event.Feed

View File

@@ -1,509 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package event
import (
"fmt"
"reflect"
"sync"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
)
func TestFeedPanics(t *testing.T) {
{
var f Feed
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed
ch := make(chan int)
f.Subscribe(ch)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed
f.Send(2)
want := feedTypeError{op: "Subscribe", got: reflect.TypeOf(make(chan uint64)), want: reflect.TypeOf(make(chan<- int))}
assert.NoError(t, checkPanic(want, func() { f.Subscribe(make(chan uint64)) }))
}
{
var f Feed
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(make(<-chan int)) }))
}
{
var f Feed
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(0) }))
}
}
func checkPanic(want error, fn func()) (err error) {
defer func() {
panicResult := recover()
if panicResult == nil {
err = fmt.Errorf("didn't panic")
} else if !reflect.DeepEqual(panicResult, want) {
err = fmt.Errorf("panicked with wrong error: got %q, want %q", panicResult, want)
}
}()
fn()
return nil
}
func TestFeed(t *testing.T) {
var feed Feed
var done, subscribed sync.WaitGroup
subscriber := func(i int) {
defer done.Done()
subchan := make(chan int)
sub := feed.Subscribe(subchan)
timeout := time.NewTimer(2 * time.Second)
subscribed.Done()
select {
case v := <-subchan:
if v != 1 {
t.Errorf("%d: received value %d, want 1", i, v)
}
case <-timeout.C:
t.Errorf("%d: receive timeout", i)
}
sub.Unsubscribe()
select {
case _, ok := <-sub.Err():
if ok {
t.Errorf("%d: error channel not closed after unsubscribe", i)
}
case <-timeout.C:
t.Errorf("%d: unsubscribe timeout", i)
}
}
const n = 1000
done.Add(n)
subscribed.Add(n)
for i := 0; i < n; i++ {
go subscriber(i)
}
subscribed.Wait()
if nsent := feed.Send(1); nsent != n {
t.Errorf("first send delivered %d times, want %d", nsent, n)
}
if nsent := feed.Send(2); nsent != 0 {
t.Errorf("second send delivered %d times, want 0", nsent)
}
done.Wait()
}
func TestFeedSubscribeSameChannel(t *testing.T) {
var (
feed Feed
done sync.WaitGroup
ch = make(chan int)
sub1 = feed.Subscribe(ch)
sub2 = feed.Subscribe(ch)
_ = feed.Subscribe(ch)
)
expectSends := func(value, n int) {
if nsent := feed.Send(value); nsent != n {
t.Errorf("send delivered %d times, want %d", nsent, n)
}
done.Done()
}
expectRecv := func(wantValue, n int) {
for i := 0; i < n; i++ {
if v := <-ch; v != wantValue {
t.Errorf("received %d, want %d", v, wantValue)
}
}
}
done.Add(1)
go expectSends(1, 3)
expectRecv(1, 3)
done.Wait()
sub1.Unsubscribe()
done.Add(1)
go expectSends(2, 2)
expectRecv(2, 2)
done.Wait()
sub2.Unsubscribe()
done.Add(1)
go expectSends(3, 1)
expectRecv(3, 1)
done.Wait()
}
func TestFeedSubscribeBlockedPost(_ *testing.T) {
var (
feed Feed
nsends = 2000
ch1 = make(chan int)
ch2 = make(chan int)
wg sync.WaitGroup
)
defer wg.Wait()
feed.Subscribe(ch1)
wg.Add(nsends)
for i := 0; i < nsends; i++ {
go func() {
feed.Send(99)
wg.Done()
}()
}
sub2 := feed.Subscribe(ch2)
defer sub2.Unsubscribe()
// We're done when ch1 has received N times.
// The number of receives on ch2 depends on scheduling.
for i := 0; i < nsends; {
select {
case <-ch1:
i++
case <-ch2:
}
}
}
func TestFeedUnsubscribeBlockedPost(_ *testing.T) {
var (
feed Feed
nsends = 200
chans = make([]chan int, 2000)
subs = make([]Subscription, len(chans))
bchan = make(chan int)
bsub = feed.Subscribe(bchan)
wg sync.WaitGroup
)
for i := range chans {
chans[i] = make(chan int, nsends)
}
// Queue up some Sends. None of these can make progress while bchan isn't read.
wg.Add(nsends)
for i := 0; i < nsends; i++ {
go func() {
feed.Send(99)
wg.Done()
}()
}
// Subscribe the other channels.
for i, ch := range chans {
subs[i] = feed.Subscribe(ch)
}
// Unsubscribe them again.
for _, sub := range subs {
sub.Unsubscribe()
}
// Unblock the Sends.
bsub.Unsubscribe()
wg.Wait()
}
// Checks that unsubscribing a channel during Send works even if that
// channel has already been sent on.
func TestFeedUnsubscribeSentChan(_ *testing.T) {
var (
feed Feed
ch1 = make(chan int)
ch2 = make(chan int)
sub1 = feed.Subscribe(ch1)
sub2 = feed.Subscribe(ch2)
wg sync.WaitGroup
)
defer sub2.Unsubscribe()
wg.Add(1)
go func() {
feed.Send(0)
wg.Done()
}()
// Wait for the value on ch1.
<-ch1
// Unsubscribe ch1, removing it from the send cases.
sub1.Unsubscribe()
// Receive ch2, finishing Send.
<-ch2
wg.Wait()
// Send again. This should send to ch2 only, so the wait group will unblock
// as soon as a value is received on ch2.
wg.Add(1)
go func() {
feed.Send(0)
wg.Done()
}()
<-ch2
wg.Wait()
}
func TestFeedUnsubscribeFromInbox(t *testing.T) {
var (
feed Feed
ch1 = make(chan int)
ch2 = make(chan int)
sub1 = feed.Subscribe(ch1)
sub2 = feed.Subscribe(ch1)
sub3 = feed.Subscribe(ch2)
)
assert.Equal(t, 3, len(feed.inbox))
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
sub1.Unsubscribe()
sub2.Unsubscribe()
sub3.Unsubscribe()
assert.Equal(t, 0, len(feed.inbox), "Inbox is non-empty after unsubscribe")
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
}
func BenchmarkFeedSend1000(b *testing.B) {
var (
done sync.WaitGroup
feed Feed
nsubs = 1000
)
subscriber := func(ch <-chan int) {
for i := 0; i < b.N; i++ {
<-ch
}
done.Done()
}
done.Add(nsubs)
for i := 0; i < nsubs; i++ {
ch := make(chan int, 200)
feed.Subscribe(ch)
go subscriber(ch)
}
// The actual benchmark.
b.ResetTimer()
for i := 0; i < b.N; i++ {
if feed.Send(i) != nsubs {
panic("wrong number of sends")
}
}
b.StopTimer()
done.Wait()
}
func TestFeed_Send(t *testing.T) {
tests := []struct {
name string
evFeed *Feed
testSetup func(fd *Feed, t *testing.T, o interface{})
obj interface{}
expectPanic bool
}{
{
name: "normal struct",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedWithPointer, 1)
fd.Subscribe(testChan)
},
obj: testFeedWithPointer{
a: new(uint64),
b: new(string),
},
expectPanic: false,
},
{
name: "un-implemented interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface, 1)
fd.Subscribe(testChan)
},
obj: testFeedWithPointer{
a: new(uint64),
b: new(string),
},
expectPanic: true,
},
{
name: "semi-implemented interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface, 1)
fd.Subscribe(testChan)
},
obj: testFeed2{
a: 0,
b: "",
c: []byte{'A'},
},
expectPanic: true,
},
{
name: "fully-implemented interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface)
// Make it unbuffered to allow message to
// pass through
go func() {
a := <-testChan
if !reflect.DeepEqual(a, o) {
t.Errorf("Got = %v, want = %v", a, o)
}
}()
fd.Subscribe(testChan)
},
obj: testFeed{
a: 0,
b: "",
},
expectPanic: false,
},
{
name: "fully-implemented interface with additional methods",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeedIface)
// Make it unbuffered to allow message to
// pass through
go func() {
a := <-testChan
if !reflect.DeepEqual(a, o) {
t.Errorf("Got = %v, want = %v", a, o)
}
}()
fd.Subscribe(testChan)
},
obj: testFeed3{
a: 0,
b: "",
c: []byte{'A'},
d: []byte{'B'},
},
expectPanic: false,
},
{
name: "concrete types implementing the same interface",
evFeed: new(Feed),
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
testChan := make(chan testFeed, 1)
// Make it unbuffered to allow message to
// pass through
go func() {
a := <-testChan
if !reflect.DeepEqual(a, o) {
t.Errorf("Got = %v, want = %v", a, o)
}
}()
fd.Subscribe(testChan)
},
obj: testFeed3{
a: 0,
b: "",
c: []byte{'A'},
d: []byte{'B'},
},
expectPanic: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer func() {
if r := recover(); r != nil {
if !tt.expectPanic {
t.Errorf("panic triggered when unexpected: %v", r)
}
} else {
if tt.expectPanic {
t.Error("panic not triggered when expected")
}
}
}()
tt.testSetup(tt.evFeed, t, tt.obj)
if gotNsent := tt.evFeed.Send(tt.obj); gotNsent != 1 {
t.Errorf("Send() = %v, want %v", gotNsent, 1)
}
})
}
}
// The following objects below are a collection of different
// struct types to test with.
type testFeed struct {
a uint64
b string
}
func (testFeed) method1() {
}
func (testFeed) method2() {
}
type testFeedWithPointer struct {
a *uint64
b *string
}
type testFeed2 struct {
a uint64
b string
c []byte
}
func (testFeed2) method1() {
}
type testFeed3 struct {
a uint64
b string
c, d []byte
}
func (testFeed3) method1() {
}
func (testFeed3) method2() {
}
func (testFeed3) method3() {
}
type testFeedIface interface {
method1()
method2()
}

View File

@@ -6,6 +6,7 @@ go_library(
"chain_info.go",
"chain_info_forkchoice.go",
"currently_syncing_block.go",
"currently_syncing_execution_payload_envelope.go",
"defragment.go",
"error.go",
"execution_engine.go",
@@ -13,7 +14,6 @@ go_library(
"head.go",
"head_sync_committee_info.go",
"init_sync_process_block.go",
"lightclient.go",
"log.go",
"merge_ascii_art.go",
"metrics.go",
@@ -26,6 +26,8 @@ go_library(
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"receive_execution_payload_envelope.go",
"receive_payload_attestation_message.go",
"service.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
@@ -44,10 +46,12 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epbs:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/light-client:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -81,10 +85,10 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
@@ -110,6 +114,7 @@ go_test(
"chain_info_norace_test.go",
"chain_info_test.go",
"checktags_test.go",
"epbs_test.go",
"error_test.go",
"execution_engine_test.go",
"forkchoice_update_execution_test.go",
@@ -117,7 +122,6 @@ go_test(
"head_test.go",
"init_sync_process_block_test.go",
"init_test.go",
"lightclient_test.go",
"log_test.go",
"metrics_test.go",
"mock_test.go",
@@ -126,6 +130,7 @@ go_test(
"process_block_test.go",
"receive_attestation_test.go",
"receive_block_test.go",
"receive_execution_payload_envelope_test.go",
"service_norace_test.go",
"service_test.go",
"setup_test.go",
@@ -174,12 +179,12 @@ go_test(
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//testing/util/random:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -16,9 +16,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// ChainInfoFetcher defines a common interface for methods in blockchain service which
@@ -42,7 +42,7 @@ type ForkchoiceFetcher interface {
GetProposerHead() [32]byte
SetForkChoiceGenesisTime(uint64)
UpdateHead(context.Context, primitives.Slot)
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte)
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, [32]byte) error
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
@@ -50,6 +50,7 @@ type ForkchoiceFetcher interface {
ProposerBoost() [32]byte
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
GetPTCVote(root [32]byte) primitives.PTCStatus
}
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
@@ -118,6 +119,12 @@ type OptimisticModeFetcher interface {
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
}
// ExecutionPayloadFetcher defines a common interface that returns forkchoice
// information about payload block hashes
type ExecutionPayloadFetcher interface {
HashInForkchoice([32]byte) bool
}
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
s.cfg.ForkChoiceStore.RLock()
@@ -203,7 +210,7 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
span.SetAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headState(ctx), nil
@@ -225,7 +232,7 @@ func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconSt
defer s.headLock.RUnlock()
ok := s.hasHeadState()
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
span.SetAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headStateReadOnly(ctx), nil
@@ -399,6 +406,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// HashInForkchoice returns true if the given payload block hash is found in
// forkchoice
func (s *Service) HashInForkchoice(hash [32]byte) bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HasHash(hash)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
@@ -22,13 +23,6 @@ func (s *Service) GetProposerHead() [32]byte {
return s.cfg.ForkChoiceStore.GetProposerHead()
}
// ShouldOverrideFCU returns the corresponding value from forkchoice
func (s *Service) ShouldOverrideFCU() bool {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
}
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.Lock()
@@ -36,11 +30,11 @@ func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
}
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
// HighestReceivedBlockSlotRoot returns the corresponding value from forkchoice
func (s *Service) HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlotRoot()
}
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
@@ -99,3 +93,37 @@ func (s *Service) FinalizedBlockHash() [32]byte {
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
}
// ParentRoot wraps a call to the corresponding method in forkchoice
func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ParentRoot(root)
}
// GetPTCVote wraps a call to the corresponding method in forkchoice and checks
// the currently syncing status
// Warning: this method will return the current PTC status regardless of
// timeliness. A client MUST call this method when about to submit a PTC
// attestation, that is exactly at the threshold to submit the attestation.
func (s *Service) GetPTCVote(root [32]byte) primitives.PTCStatus {
s.cfg.ForkChoiceStore.RLock()
f := s.cfg.ForkChoiceStore.GetPTCVote()
s.cfg.ForkChoiceStore.RUnlock()
if f != primitives.PAYLOAD_ABSENT {
return f
}
f, isSyncing := s.payloadBeingSynced.isSyncing(root)
if isSyncing {
return f
}
return primitives.PAYLOAD_ABSENT
}
// insertPayloadEnvelope wraps a locked call to the corresponding method in
// forkchoice
func (s *Service) insertPayloadEnvelope(envelope interfaces.ROExecutionPayloadEnvelope) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertPayloadEnvelope(envelope)
}

View File

@@ -0,0 +1,36 @@
package blockchain
import (
"sync"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
type currentlySyncingPayload struct {
sync.Mutex
roots map[[32]byte]primitives.PTCStatus
}
func (b *currentlySyncingPayload) set(envelope interfaces.ROExecutionPayloadEnvelope) {
b.Lock()
defer b.Unlock()
if envelope.PayloadWithheld() {
b.roots[envelope.BeaconBlockRoot()] = primitives.PAYLOAD_WITHHELD
} else {
b.roots[envelope.BeaconBlockRoot()] = primitives.PAYLOAD_PRESENT
}
}
func (b *currentlySyncingPayload) unset(root [32]byte) {
b.Lock()
defer b.Unlock()
delete(b.roots, root)
}
func (b *currentlySyncingPayload) isSyncing(root [32]byte) (status primitives.PTCStatus, isSyncing bool) {
b.Lock()
defer b.Unlock()
status, isSyncing = b.roots[root]
return
}

View File

@@ -0,0 +1,18 @@
package blockchain
import (
"testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestServiceGetPTCVote(t *testing.T) {
c := &currentlySyncingPayload{roots: make(map[[32]byte]primitives.PTCStatus)}
s := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, payloadBeingSynced: c}
r := [32]byte{'r'}
require.Equal(t, primitives.PAYLOAD_ABSENT, s.GetPTCVote(r))
c.roots[r] = primitives.PAYLOAD_WITHHELD
require.Equal(t, primitives.PAYLOAD_WITHHELD, s.GetPTCVote(r))
}

View File

@@ -30,6 +30,9 @@ var (
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
// ErrNilHead is returned when no head is present in the blockchain service.
ErrNilHead = errors.New("nil head")
// errInvalidValidatorIndex is returned when a validator index is
// invalid or unexpected
errInvalidValidatorIndex = errors.New("invalid validator index")
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"context"
"crypto/sha256"
"fmt"
"github.com/ethereum/go-ethereum/common"
@@ -21,15 +20,13 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
const blobCommitmentVersionKZG uint8 = 0x01
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
@@ -39,7 +36,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
if arg.headBlock.IsNil() {
if arg.headBlock == nil || arg.headBlock.IsNil() {
log.Error("Head block is nil")
return nil, nil
}
@@ -402,13 +399,7 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
versionedHashes := make([]common.Hash, len(commitments))
for i, commitment := range commitments {
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
}
return versionedHashes, nil
}
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
versionedHash := sha256.Sum256(commitment)
versionedHash[0] = blobCommitmentVersionKZG
return versionedHash
}

View File

@@ -1056,8 +1056,8 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
require.Equal(t, false, service.hasBlock(ctx, r1))
require.Equal(t, false, service.hasBlock(ctx, r2))
require.Equal(t, false, service.chainHasBlock(ctx, r1))
require.Equal(t, false, service.chainHasBlock(ctx, r2))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
has, err := service.cfg.StateGen.HasState(ctx, r1)

View File

@@ -12,9 +12,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
func (s *Service) isNewHead(r [32]byte) bool {

View File

@@ -18,11 +18,11 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.

View File

@@ -1,245 +0,0 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v5/proto/migration"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
const (
FinalityBranchNumOfLeaves = 6
)
// CreateLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
//
// return LightClientFinalityUpdate(
// attested_header=update.attested_header,
// finalized_header=update.finalized_header,
// finality_branch=update.finality_branch,
// sync_aggregate=update.sync_aggregate,
// signature_slot=update.signature_slot,
// )
func CreateLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
return &ethpbv2.LightClientFinalityUpdate{
AttestedHeader: update.AttestedHeader,
FinalizedHeader: update.FinalizedHeader,
FinalityBranch: update.FinalityBranch,
SyncAggregate: update.SyncAggregate,
SignatureSlot: update.SignatureSlot,
}
}
// CreateLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
//
// return LightClientOptimisticUpdate(
// attested_header=update.attested_header,
// sync_aggregate=update.sync_aggregate,
// signature_slot=update.signature_slot,
// )
func CreateLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
return &ethpbv2.LightClientOptimisticUpdate{
AttestedHeader: update.AttestedHeader,
SyncAggregate: update.SyncAggregate,
SignatureSlot: update.SignatureSlot,
}
}
func NewLightClientOptimisticUpdateFromBeaconState(
ctx context.Context,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState) (*ethpbv2.LightClientUpdate, error) {
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
attestedEpoch := slots.ToEpoch(attestedState.Slot())
if attestedEpoch < params.BeaconConfig().AltairForkEpoch {
return nil, fmt.Errorf("invalid attested epoch %d", attestedEpoch)
}
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
syncAggregate, err := block.Block().Body().SyncAggregate()
if err != nil {
return nil, fmt.Errorf("could not get sync aggregate %w", err)
}
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
}
// assert state.slot == state.latest_block_header.slot
if state.Slot() != state.LatestBlockHeader().Slot {
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), state.LatestBlockHeader().Slot)
}
// assert hash_tree_root(header) == hash_tree_root(block.message)
header := state.LatestBlockHeader()
stateRoot, err := state.HashTreeRoot(ctx)
if err != nil {
return nil, fmt.Errorf("could not get state root %w", err)
}
header.StateRoot = stateRoot[:]
headerRoot, err := header.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("could not get header root %w", err)
}
blockRoot, err := block.Block().HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("could not get block root %w", err)
}
if headerRoot != blockRoot {
return nil, fmt.Errorf("header root %#x not equal to block root %#x", headerRoot, blockRoot)
}
// assert attested_state.slot == attested_state.latest_block_header.slot
if attestedState.Slot() != attestedState.LatestBlockHeader().Slot {
return nil, fmt.Errorf("attested state slot %d not equal to attested latest block header slot %d", attestedState.Slot(), attestedState.LatestBlockHeader().Slot)
}
// attested_header = attested_state.latest_block_header.copy()
attestedHeader := attestedState.LatestBlockHeader()
// attested_header.state_root = hash_tree_root(attested_state)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
if err != nil {
return nil, fmt.Errorf("could not get attested state root %w", err)
}
attestedHeader.StateRoot = attestedStateRoot[:]
// assert hash_tree_root(attested_header) == block.message.parent_root
attestedHeaderRoot, err := attestedHeader.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("could not get attested header root %w", err)
}
if attestedHeaderRoot != block.Block().ParentRoot() {
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
}
// Return result
attestedHeaderResult := &ethpbv1.BeaconBlockHeader{
Slot: attestedHeader.Slot,
ProposerIndex: attestedHeader.ProposerIndex,
ParentRoot: attestedHeader.ParentRoot,
StateRoot: attestedHeader.StateRoot,
BodyRoot: attestedHeader.BodyRoot,
}
syncAggregateResult := &ethpbv1.SyncAggregate{
SyncCommitteeBits: syncAggregate.SyncCommitteeBits,
SyncCommitteeSignature: syncAggregate.SyncCommitteeSignature,
}
result := &ethpbv2.LightClientUpdate{
AttestedHeader: attestedHeaderResult,
SyncAggregate: syncAggregateResult,
SignatureSlot: block.Block().Slot(),
}
return result, nil
}
func NewLightClientFinalityUpdateFromBeaconState(
ctx context.Context,
state state.BeaconState,
block interfaces.ReadOnlySignedBeaconBlock,
attestedState state.BeaconState,
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
result, err := NewLightClientOptimisticUpdateFromBeaconState(
ctx,
state,
block,
attestedState,
)
if err != nil {
return nil, err
}
// Indicate finality whenever possible
var finalizedHeader *ethpbv1.BeaconBlockHeader
var finalityBranch [][]byte
if finalizedBlock != nil && !finalizedBlock.IsNil() {
if finalizedBlock.Block().Slot() != 0 {
tempFinalizedHeader, err := finalizedBlock.Header()
if err != nil {
return nil, fmt.Errorf("could not get finalized header %w", err)
}
finalizedHeader = migration.V1Alpha1SignedHeaderToV1(tempFinalizedHeader).GetMessage()
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("could not get finalized header root %w", err)
}
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
return nil, fmt.Errorf("finalized header root %#x not equal to attested finalized checkpoint root %#x", finalizedHeaderRoot, bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root))
}
} else {
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
return nil, fmt.Errorf("invalid finalized header root %v", attestedState.FinalizedCheckpoint().Root)
}
finalizedHeader = &ethpbv1.BeaconBlockHeader{
Slot: 0,
ProposerIndex: 0,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}
}
var bErr error
finalityBranch, bErr = attestedState.FinalizedRootProof(ctx)
if bErr != nil {
return nil, fmt.Errorf("could not get finalized root proof %w", bErr)
}
} else {
finalizedHeader = &ethpbv1.BeaconBlockHeader{
Slot: 0,
ProposerIndex: 0,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}
finalityBranch = make([][]byte, FinalityBranchNumOfLeaves)
for i := 0; i < FinalityBranchNumOfLeaves; i++ {
finalityBranch[i] = make([]byte, 32)
}
}
result.FinalizedHeader = finalizedHeader
result.FinalityBranch = finalityBranch
return result, nil
}
func NewLightClientUpdateFromFinalityUpdate(update *ethpbv2.LightClientFinalityUpdate) *ethpbv2.LightClientUpdate {
return &ethpbv2.LightClientUpdate{
AttestedHeader: update.AttestedHeader,
FinalizedHeader: update.FinalizedHeader,
FinalityBranch: update.FinalityBranch,
SyncAggregate: update.SyncAggregate,
SignatureSlot: update.SignatureSlot,
}
}
func NewLightClientUpdateFromOptimisticUpdate(update *ethpbv2.LightClientOptimisticUpdate) *ethpbv2.LightClientUpdate {
return &ethpbv2.LightClientUpdate{
AttestedHeader: update.AttestedHeader,
SyncAggregate: update.SyncAggregate,
SignatureSlot: update.SignatureSlot,
}
}

View File

@@ -1,160 +0,0 @@
package blockchain
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
type testlc struct {
t *testing.T
ctx context.Context
state state.BeaconState
block interfaces.ReadOnlySignedBeaconBlock
attestedState state.BeaconState
attestedHeader *ethpb.BeaconBlockHeader
}
func newTestLc(t *testing.T) *testlc {
return &testlc{t: t}
}
func (l *testlc) setupTest() *testlc {
ctx := context.Background()
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
attestedState, err := util.NewBeaconStateCapella()
require.NoError(l.t, err)
err = attestedState.SetSlot(slot)
require.NoError(l.t, err)
parent := util.NewBeaconBlockCapella()
parent.Block.Slot = slot
signedParent, err := blocks.NewSignedBeaconBlock(parent)
require.NoError(l.t, err)
parentHeader, err := signedParent.Header()
require.NoError(l.t, err)
attestedHeader := parentHeader.Header
err = attestedState.SetLatestBlockHeader(attestedHeader)
require.NoError(l.t, err)
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
require.NoError(l.t, err)
// get a new signed block so the root is updated with the new state root
parent.Block.StateRoot = attestedStateRoot[:]
signedParent, err = blocks.NewSignedBeaconBlock(parent)
require.NoError(l.t, err)
state, err := util.NewBeaconStateCapella()
require.NoError(l.t, err)
err = state.SetSlot(slot)
require.NoError(l.t, err)
parentRoot, err := signedParent.Block().HashTreeRoot()
require.NoError(l.t, err)
block := util.NewBeaconBlockCapella()
block.Block.Slot = slot
block.Block.ParentRoot = parentRoot[:]
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
}
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(l.t, err)
h, err := signedBlock.Header()
require.NoError(l.t, err)
err = state.SetLatestBlockHeader(h.Header)
require.NoError(l.t, err)
stateRoot, err := state.HashTreeRoot(ctx)
require.NoError(l.t, err)
// get a new signed block so the root is updated with the new state root
block.Block.StateRoot = stateRoot[:]
signedBlock, err = blocks.NewSignedBeaconBlock(block)
require.NoError(l.t, err)
l.state = state
l.attestedState = attestedState
l.attestedHeader = attestedHeader
l.block = signedBlock
l.ctx = ctx
return l
}
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
require.NoError(l.t, err)
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
}
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
syncAggregate, err := l.block.Block().Body().SyncAggregate()
require.NoError(l.t, err)
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
}
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
l := newTestLc(t).setupTest()
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
l.checkSyncAggregate(update)
l.checkAttestedHeader(update)
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
}
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
l := newTestLc(t).setupTest()
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
require.NoError(t, err)
require.NotNil(t, update, "update is nil")
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
l.checkSyncAggregate(update)
l.checkAttestedHeader(update)
zeroHash := params.BeaconConfig().ZeroHash[:]
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
require.Equal(t, FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
for _, leaf := range update.FinalityBranch {
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
}
}

View File

@@ -182,6 +182,10 @@ var (
Name: "chain_service_processing_milliseconds",
Help: "Total time to call a chain service in ReceiveBlock()",
})
executionEngineProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "execution_engine_processing_milliseconds",
Help: "Total time to process an execution payload envelope in ReceiveExecutionPayloadEnvelope()",
})
dataAvailWaitedTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "da_waited_time_milliseconds",
Help: "Total time spent waiting for a data availability check in ReceiveBlock()",

View File

@@ -1,6 +1,8 @@
package blockchain
import (
"sync"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
@@ -69,6 +71,22 @@ func WithDepositCache(c cache.DepositCache) Option {
}
}
// WithPayloadAttestationCache for payload attestation cache.
func WithPayloadAttestationCache(c *cache.PayloadAttestationCache) Option {
return func(s *Service) error {
s.cfg.PayloadAttestationCache = c
return nil
}
}
// WithPayloadEnvelopeCache for payload envelope cache.
func WithPayloadEnvelopeCache(c *sync.Map) Option {
return func(s *Service) error {
s.cfg.PayloadEnvelopeCache = c
return nil
}
}
// WithPayloadIDCache for payload ID cache.
func WithPayloadIDCache(c *cache.PayloadIDCache) Option {
return func(s *Service) error {

View File

@@ -46,7 +46,7 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.ReadOnlyS
if err != nil {
return err
}
if payload.IsNil() {
if payload == nil || payload.IsNil() {
return errors.New("nil execution payload")
}
ok, err := canUseValidatedTerminalBlockHash(b.Block().Slot(), payload)

View File

@@ -7,10 +7,10 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// OnAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
@@ -97,7 +97,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
// We assume trusted attestation in this function has verified signature.
// Update forkchoice store with the new attestation for updating weight.
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Slot)
return nil
}

View File

@@ -25,12 +25,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// A custom slot deadline for processing state slots in our cache.
@@ -142,7 +142,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
b := blks[0].Block()
// Retrieve incoming block's pre state.
if err := s.verifyBlkPreState(ctx, b); err != nil {
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
return err
}
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
@@ -375,7 +375,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
}
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
if s.cfg.ForkChoiceStore.HasNode(r) {
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Slot)
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
return err
}

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"time"
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
@@ -14,16 +16,17 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// CurrentSlot returns the current slot based on time.
@@ -175,7 +178,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
}
}
update, err := NewLightClientFinalityUpdateFromBeaconState(
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
ctx,
postState,
signed,
@@ -190,7 +193,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
// Return the result
result := &ethpbv2.LightClientFinalityUpdateWithVersion{
Version: ethpbv2.Version(signed.Version()),
Data: CreateLightClientFinalityUpdate(update),
Data: update,
}
// Send event
@@ -210,7 +213,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
return 0, errors.Wrap(err, "could not get attested state")
}
update, err := NewLightClientOptimisticUpdateFromBeaconState(
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
ctx,
postState,
signed,
@@ -224,7 +227,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
// Return the result
result := &ethpbv2.LightClientOptimisticUpdateWithVersion{
Version: ethpbv2.Version(signed.Version()),
Data: CreateLightClientOptimisticUpdate(update),
Data: update,
}
return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
@@ -285,7 +288,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
defer span.End()
// Verify incoming block has a valid pre state.
if err := s.verifyBlkPreState(ctx, b); err != nil {
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
return nil, err
}
@@ -311,11 +314,10 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
}
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) error {
func (s *Service) verifyBlkPreState(ctx context.Context, parentRoot [field_params.RootLength]byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
defer span.End()
parentRoot := b.ParentRoot()
// Loosen the check to HasBlock because state summary gets saved in batches
// during initial syncing. There's no risk given a state summary object is just a
// subset of the block object.

View File

@@ -117,7 +117,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: 1, Root: root[:]}))
require.NoError(t, service.cfg.StateGen.SaveState(ctx, root, st))
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block().ParentRoot()))
}
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
@@ -2044,7 +2044,11 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
defaultConfig := util.DefaultBlockGenConfig()
defaultConfig.NumWithdrawalRequests = 1
defaultConfig.NumDepositRequests = 2
defaultConfig.NumConsolidationRequests = 1
b, err := util.GenerateFullBlockElectra(st, keys, defaultConfig, 1)
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -2059,7 +2063,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
b, err = util.GenerateFullBlockElectra(st, keys, defaultConfig, 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
@@ -2067,7 +2071,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
b3, err := util.GenerateFullBlockElectra(st3, keys, defaultConfig, 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)

View File

@@ -12,10 +12,11 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// reorgLateBlockCountAttestations is the time until the end of the slot in which we count
@@ -176,7 +177,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
}
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
hasBlock := s.chainHasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
if !(hasState && hasBlock) {
continue
}
@@ -190,13 +191,26 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
}
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
log.WithFields(logrus.Fields{
"slot": a.GetData().Slot,
"committeeIndex": a.GetData().CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregationCount": a.GetAggregationBits().Count(),
}).WithError(err).Warn("Could not process attestation for fork choice")
var fields logrus.Fields
if a.Version() >= version.Electra {
fields = logrus.Fields{
"slot": a.GetData().Slot,
"committeeCount": a.CommitteeBitsVal().Count(),
"committeeIndices": a.CommitteeBitsVal().BitIndices(),
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregatedCount": a.GetAggregationBits().Count(),
}
} else {
fields = logrus.Fields{
"slot": a.GetData().Slot,
"committeeIndex": a.GetData().CommitteeIndex,
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
"aggregatedCount": a.GetAggregationBits().Count(),
}
}
log.WithFields(fields).WithError(err).Warn("Could not process attestation for fork choice")
}
}
}

View File

@@ -21,12 +21,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
)
@@ -39,18 +39,30 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
ReceiveExecutionPayloadEnvelope(ctx context.Context, env interfaces.ROExecutionPayloadEnvelope, avs das.AvailabilityStore) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
}
// PayloadAttestationReceiver defines methods of the chain service for receiving
// and processing new payload attestations and payload attestation messages
type PayloadAttestationReceiver interface {
ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error
}
// BlobReceiver interface defines the methods of chain service for receiving new
// blobs
type BlobReceiver interface {
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
}
// ExecutionPayloadReceiver interface defines the methods of chain service for receiving `ROExecutionPayloadEnvelope`.
type ExecutionPayloadReceiver interface {
ReceiveExecutionPayloadEnvelope(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope, _ das.AvailabilityStore) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
@@ -77,59 +89,20 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err != nil {
return err
}
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
return errors.Wrap(err, "could not get block's prestate")
}
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
currentEpoch := coreTime.CurrentEpoch(preState)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
currentCheckpoints := s.saveCurrentCheckpoints(preState)
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
if err != nil {
return err
}
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
if err != nil {
return err
}
daStartTime := time.Now()
if avs != nil {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
return errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
}
} else {
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
}
}
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
// Defragment the state before continuing block processing.
s.defragmentState(postState)
@@ -151,29 +124,9 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
tracing.AnnotateError(span, err)
return err
}
if coreTime.CurrentEpoch(postState) > currentEpoch && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
return err
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
// hook to process all post state finalization tasks
s.executePostFinalizationTasks(ctx, postState)
}
// If slasher is configured, forward the attestations in the block via an event feed for processing.
if features.Get().EnableSlasher {
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
@@ -193,31 +146,140 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := s.handleCaches(); err != nil {
return err
}
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
return nil
}
type ffgCheckpoints struct {
j, f, c primitives.Epoch
}
func (s *Service) saveCurrentCheckpoints(state state.BeaconState) (cp ffgCheckpoints) {
// Save current justified and finalized epochs for future use.
cp.j = s.CurrentJustifiedCheckpt().Epoch
cp.f = s.FinalizedCheckpt().Epoch
cp.c = coreTime.CurrentEpoch(state)
return
}
func (s *Service) updateCheckpoints(
ctx context.Context,
cp ffgCheckpoints,
preState, postState state.BeaconState,
blockRoot [32]byte,
) error {
if coreTime.CurrentEpoch(postState) > cp.c && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, cp.f)
if err != nil {
return errors.Wrap(err, "could not update finalized checkpoint")
}
// Send finalized events and finalized deposits in the background
if newFinalized {
// hook to process all post state finalization tasks
s.executePostFinalizationTasks(ctx, postState)
}
return nil
}
func (s *Service) validateExecutionAndConsensus(
ctx context.Context,
preState state.BeaconState,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
) (state.BeaconState, bool, error) {
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return nil, false, err
}
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
var err error
postState, err = s.validateStateTransition(ctx, preState, block)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return nil, false, err
}
return postState, isValidPayload, nil
}
func (s *Service) handleDA(
ctx context.Context,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
avs das.AvailabilityStore,
) (time.Duration, error) {
daStartTime := time.Now()
if avs != nil {
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return 0, err
}
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
}
} else {
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
return 0, errors.Wrap(err, "could not validate blob data availability")
}
}
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
return daWaitedTime, nil
}
func (s *Service) reportPostBlockProcessing(
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
receivedTime time.Time,
daWaitedTime time.Duration,
) {
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
if err := logPayload(blockCopy.Block()); err != nil {
if err := logPayload(block.Block()); err != nil {
log.WithError(err).Error("Unable to log debug block payload data")
}
// Log state transition data.
if err := logStateTransitionData(blockCopy.Block()); err != nil {
if err := logStateTransitionData(block.Block()); err != nil {
log.WithError(err).Error("Unable to log state transition data")
}
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
return nil
}
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {

View File

@@ -0,0 +1,152 @@
package blockchain
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
)
// ReceiveExecutionPayloadEnvelope is a function that defines the operations (minus pubsub)
// that are performed on a received execution payload envelope. The operations consist of:
// 1. Validate the payload, apply state transition.
// 2. Apply fork choice to the processed payload
// 3. Save latest head info
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope, _ das.AvailabilityStore) error {
receivedTime := time.Now()
root := envelope.BeaconBlockRoot()
s.payloadBeingSynced.set(envelope)
defer s.payloadBeingSynced.unset(root)
preState, err := s.getPayloadEnvelopePrestate(ctx, envelope)
if err != nil {
return errors.Wrap(err, "could not get prestate")
}
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
if err := epbs.ValidatePayloadStateTransition(ctx, preState, envelope); err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
var err error
isValidPayload, err = s.validateExecutionOnEnvelope(ctx, envelope)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
_ = isValidPayload
_ = postState
daStartTime := time.Now()
// TODO: Add DA check
daWaitedTime := time.Since(daStartTime)
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
// TODO: Add Head update, cache handling, postProcessing
if err := s.insertPayloadEnvelope(envelope); err != nil {
return errors.Wrap(err, "could not insert payload to forkchoice")
}
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
executionEngineProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
return nil
}
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewEnvelope(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
payload, err := envelope.Execution()
if err != nil {
return false, errors.Wrap(err, "could not get execution payload")
}
versionedHashes := envelope.VersionedHashes()
root := envelope.BeaconBlockRoot()
parentRoot, err := s.ParentRoot(root)
if err != nil {
return false, errors.Wrap(err, "could not get parent block root")
}
pr := common.Hash(parentRoot)
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
switch {
case err == nil:
newPayloadValidNodeCount.Inc()
return true, nil
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
newPayloadOptimisticNodeCount.Inc()
log.WithFields(logrus.Fields{
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
}).Info("Called new payload with optimistic block")
return false, nil
case errors.Is(err, execution.ErrInvalidPayloadStatus):
lvh := bytesutil.ToBytes32(lastValidHash)
return false, invalidBlock{
error: ErrInvalidPayload,
lastValidHash: lvh,
}
default:
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
}
// validateExecutionOnEnvelope notifies the engine of the incoming execution payload and returns true if the payload is valid
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, e interfaces.ROExecutionPayloadEnvelope) (bool, error) {
isValidPayload, err := s.notifyNewEnvelope(ctx, e)
if err == nil {
return isValidPayload, nil
}
blockRoot := e.BeaconBlockRoot()
parentRoot, rootErr := s.ParentRoot(blockRoot)
if rootErr != nil {
return false, errors.Wrap(rootErr, "could not get parent block root")
}
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, blockRoot, parentRoot)
s.cfg.ForkChoiceStore.Unlock()
return false, err
}
func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, e interfaces.ROExecutionPayloadEnvelope) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.getPayloadEnvelopePreState")
defer span.End()
// Verify incoming payload has a valid pre state.
root := e.BeaconBlockRoot()
// Verify the referred block is known to forkchoice
if !s.InForkchoice(root) {
return nil, errors.New("Cannot import execution payload envelope for unknown block")
}
if err := s.verifyBlkPreState(ctx, root); err != nil {
return nil, errors.Wrap(err, "could not verify payload prestate")
}
preState, err := s.cfg.StateGen.StateByRoot(ctx, root)
if err != nil {
return nil, errors.Wrap(err, "could not get pre state")
}
if preState == nil || preState.IsNil() {
return nil, errors.Wrap(err, "nil pre state")
}
return preState, nil
}

View File

@@ -0,0 +1,99 @@
package blockchain
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
)
func Test_getPayloadEnvelopePrestate(t *testing.T) {
service, tr := minimalTestService(t)
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
p := random.ExecutionPayloadEnvelope(t)
p.BeaconBlockRoot = service.originBlockRoot[:]
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
require.NoError(t, err)
_, err = service.getPayloadEnvelopePrestate(ctx, e)
require.NoError(t, err)
}
func Test_notifyNewEnvelope(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
p := random.ExecutionPayloadEnvelope(t)
p.BeaconBlockRoot = service.originBlockRoot[:]
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
require.NoError(t, err)
engine := &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = engine
isValidPayload, err := service.notifyNewEnvelope(ctx, e)
require.NoError(t, err)
require.Equal(t, true, isValidPayload)
}
func Test_validateExecutionOnEnvelope(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
p := random.ExecutionPayloadEnvelope(t)
p.BeaconBlockRoot = service.originBlockRoot[:]
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
require.NoError(t, err)
engine := &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = engine
isValidPayload, err := service.validateExecutionOnEnvelope(ctx, e)
require.NoError(t, err)
require.Equal(t, true, isValidPayload)
}
func Test_ReceiveExecutionPayloadEnvelope(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx, fcs := tr.ctx, tr.fcs
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
post := gs.Copy()
p := &enginev1.ExecutionPayloadEnvelope{
Payload: &enginev1.ExecutionPayloadElectra{
ParentHash: make([]byte, 32),
BlockHash: make([]byte, 32),
},
BeaconBlockRoot: service.originBlockRoot[:],
BlobKzgCommitments: make([][]byte, 0),
StateRoot: make([]byte, 32),
}
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
require.NoError(t, err)
das := &das.MockAvailabilityStore{}
blockHeader := post.LatestBlockHeader()
prevStateRoot, err := post.HashTreeRoot(ctx)
require.NoError(t, err)
blockHeader.StateRoot = prevStateRoot[:]
require.NoError(t, post.SetLatestBlockHeader(blockHeader))
stRoot, err := post.HashTreeRoot(ctx)
require.NoError(t, err)
p.StateRoot = stRoot[:]
engine := &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = engine
require.NoError(t, service.ReceiveExecutionPayloadEnvelope(ctx, e, das))
}

View File

@@ -0,0 +1,33 @@
package blockchain
import (
"context"
"slices"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *eth.PayloadAttestationMessage) error {
if err := helpers.ValidateNilPayloadAttestationMessage(a); err != nil {
return err
}
root := [32]byte(a.Data.BeaconBlockRoot)
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return err
}
ptc, err := helpers.GetPayloadTimelinessCommittee(ctx, st, a.Data.Slot)
if err != nil {
return err
}
idx := slices.Index(ptc, a.ValidatorIndex)
if idx == -1 {
return errInvalidValidatorIndex
}
if s.cfg.PayloadAttestationCache.Seen(root, uint64(primitives.ValidatorIndex(idx))) {
return nil
}
return s.cfg.PayloadAttestationCache.Add(a, uint64(idx))
}

View File

@@ -39,10 +39,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// Service represents a service that handles the internal
@@ -65,6 +65,7 @@ type Service struct {
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
payloadBeingSynced *currentlySyncingPayload
blobStorage *filesystem.BlobStorage
lastPublishedLightClientEpoch primitives.Epoch
}
@@ -75,6 +76,8 @@ type config struct {
ChainStartFetcher execution.ChainStartFetcher
BeaconDB db.HeadAccessDatabase
DepositCache cache.DepositCache
PayloadAttestationCache *cache.PayloadAttestationCache
PayloadEnvelopeCache *sync.Map
PayloadIDCache *cache.PayloadIDCache
TrackedValidatorsCache *cache.TrackedValidatorsCache
AttPool attestations.Pool
@@ -179,6 +182,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
blobNotifiers: bn,
cfg: &config{},
blockBeingSynced: &currentlySyncingBlock{roots: make(map[[32]byte]struct{})},
payloadBeingSynced: &currentlySyncingPayload{roots: make(map[[32]byte]primitives.PTCStatus)},
}
for _, opt := range opts {
if err := opt(srv); err != nil {
@@ -544,7 +548,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
// 2.) Check DB.
// Checking 1.) is ten times faster than checking 2.)
// this function requires a lock in forkchoice
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
func (s *Service) chainHasBlock(ctx context.Context, root [32]byte) bool {
if s.cfg.ForkChoiceStore.HasNode(root) {
return true
}

View File

@@ -382,8 +382,8 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
assert.Equal(t, false, s.chainHasBlock(ctx, [32]byte{}), "Should not have block")
assert.Equal(t, true, s.chainHasBlock(ctx, r), "Should have block")
}
func TestServiceStop_SaveCachedBlocks(t *testing.T) {

View File

@@ -3,7 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
testonly = True,
srcs = ["mock.go"],
srcs = [
"mock.go",
"mock_epbs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing",
visibility = [
"//beacon-chain:__subpackages__",

View File

@@ -37,44 +37,50 @@ var ErrNilState = errors.New("nil state")
// ChainService defines the mock interface for testing
type ChainService struct {
NotFinalized bool
Optimistic bool
ValidAttestation bool
ValidatorsRoot [32]byte
PublicKey [fieldparams.BLSPubkeyLength]byte
FinalizedCheckPoint *ethpb.Checkpoint
CurrentJustifiedCheckPoint *ethpb.Checkpoint
PreviousJustifiedCheckPoint *ethpb.Checkpoint
Slot *primitives.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
Balance *precompute.Balance
CanonicalRoots map[[32]byte]bool
Fork *ethpb.Fork
ETH1Data *ethpb.Eth1Data
InitSyncBlockRoots map[[32]byte]bool
DB db.Database
State state.BeaconState
Block interfaces.ReadOnlySignedBeaconBlock
VerifyBlkDescendantErr error
stateNotifier statefeed.Notifier
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
SyncCommitteeIndices []primitives.CommitteeIndex
blockNotifier blockfeed.Notifier
opNotifier opfeed.Notifier
Root []byte
SyncCommitteeDomain []byte
SyncSelectionProofDomain []byte
SyncContributionProofDomain []byte
SyncCommitteePubkeys [][]byte
Genesis time.Time
ForkChoiceStore forkchoice.ForkChoicer
ReceiveBlockMockErr error
OptimisticCheckRootReceived [32]byte
FinalizedRoots map[[32]byte]bool
OptimisticRoots map[[32]byte]bool
BlockSlot primitives.Slot
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
TargetRoot [32]byte
NotFinalized bool
Optimistic bool
ValidAttestation bool
ValidatorsRoot [32]byte
PublicKey [fieldparams.BLSPubkeyLength]byte
FinalizedCheckPoint *ethpb.Checkpoint
CurrentJustifiedCheckPoint *ethpb.Checkpoint
PreviousJustifiedCheckPoint *ethpb.Checkpoint
Slot *primitives.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
Balance *precompute.Balance
CanonicalRoots map[[32]byte]bool
Fork *ethpb.Fork
ETH1Data *ethpb.Eth1Data
InitSyncBlockRoots map[[32]byte]bool
DB db.Database
State state.BeaconState
Block interfaces.ReadOnlySignedBeaconBlock
ExecutionPayloadEnvelope interfaces.ROExecutionPayloadEnvelope
VerifyBlkDescendantErr error
stateNotifier statefeed.Notifier
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
SyncCommitteeIndices []primitives.CommitteeIndex
blockNotifier blockfeed.Notifier
opNotifier opfeed.Notifier
Root []byte
SyncCommitteeDomain []byte
SyncSelectionProofDomain []byte
SyncContributionProofDomain []byte
SyncCommitteePubkeys [][]byte
Genesis time.Time
ForkChoiceStore forkchoice.ForkChoicer
ReceiveBlockMockErr error
ReceiveEnvelopeMockErr error
OptimisticCheckRootReceived [32]byte
FinalizedRoots map[[32]byte]bool
OptimisticRoots map[[32]byte]bool
BlockSlot primitives.Slot
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
TargetRoot [32]byte
HighestReceivedSlot primitives.Slot
HighestReceivedRoot [32]byte
PayloadStatus primitives.PTCStatus
ReceivePayloadAttestationMessageErr error
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -567,12 +573,12 @@ func (s *ChainService) ReceivedBlocksLastEpoch() (uint64, error) {
return 0, nil
}
// HighestReceivedBlockSlot mocks the same method in the chain service
func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
// HighestReceivedBlockSlotRoot mocks the same method in the chain service
func (s *ChainService) HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte) {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.HighestReceivedBlockSlot()
return s.ForkChoiceStore.HighestReceivedBlockSlotRoot()
}
return 0
return s.HighestReceivedSlot, s.HighestReceivedRoot
}
// InsertNode mocks the same method in the chain service
@@ -632,3 +638,17 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil
}
// HashInForkchoice mocks the same method in the chain service
func (c *ChainService) HashInForkchoice([32]byte) bool {
return false
}
// ReceivePayloadAttestationMessage mocks the same method in the chain service
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
return c.ReceivePayloadAttestationMessageErr
}
func (c *ChainService) GetPTCVote(root [32]byte) primitives.PTCStatus {
return c.PayloadStatus
}

View File

@@ -0,0 +1,25 @@
package testing
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
)
// ReceiveExecutionPayloadEnvelope mocks the method in chain service.
func (s *ChainService) ReceiveExecutionPayloadEnvelope(ctx context.Context, env interfaces.ROExecutionPayloadEnvelope, _ das.AvailabilityStore) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}
if s.State == nil {
return ErrNilState
}
if s.State.Slot() == env.Slot() {
if err := s.State.SetLatestFullSlot(s.State.Slot()); err != nil {
return err
}
}
s.ExecutionPayloadEnvelope = env
return nil
}

View File

@@ -19,6 +19,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -26,7 +27,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -14,10 +14,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// ErrNoBuilder is used when builder endpoint is not configured.

View File

@@ -15,11 +15,13 @@ go_library(
"doc.go",
"error.go",
"interfaces.go",
"payload_attestation.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
"proposer_indices_type.go",
"registration.go",
"signed_execution_header.go",
"skip_slot_cache.go",
"subnet_ids.go",
"sync_committee.go",
@@ -42,10 +44,13 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
@@ -56,7 +61,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
@@ -70,10 +74,12 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_attestation_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",
"registration_test.go",
"signed_execution_header_test.go",
"skip_slot_cache_test.go",
"subnet_ids_test.go",
"sync_committee_head_state_test.go",
@@ -88,7 +94,9 @@ go_test(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
}
// popProcessNoopFunc is a no-op function that never returns an error.
func popProcessNoopFunc(_ interface{}) error {
func popProcessNoopFunc(_ interface{}, _ bool) error {
return nil
}

View File

@@ -19,6 +19,7 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -26,7 +27,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -10,10 +10,10 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"github.com/wealdtech/go-bytesutil"
"go.opencensus.io/trace"
)
var (
@@ -241,7 +241,7 @@ func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, bloc
c.pendingDeposits = append(c.pendingDeposits,
&ethpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
span.AddAttributes(trace.Int64Attribute("count", int64(len(c.pendingDeposits))))
span.SetAttributes(trace.Int64Attribute("count", int64(len(c.pendingDeposits))))
}
// Deposits returns the cached internal deposit tree.
@@ -304,7 +304,7 @@ func (c *Cache) PendingContainers(ctx context.Context, untilBlk *big.Int) []*eth
return depositCntrs[i].Index < depositCntrs[j].Index
})
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
span.SetAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
return depositCntrs
}

View File

@@ -10,9 +10,9 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
var (

View File

@@ -0,0 +1,132 @@
package cache
import (
"errors"
"sync"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
var errNilPayloadAttestationMessage = errors.New("nil Payload Attestation Message")
// PayloadAttestationCache keeps a map of all the PTC votes that were seen,
// already aggregated. The key is the beacon block root.
type PayloadAttestationCache struct {
root [32]byte
attestations [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation
sync.Mutex
}
// Seen returns true if a vote for the given Beacon Block Root has already been processed
// for this Payload Timeliness Committee index. This will return true even if
// the Payload status differs.
func (p *PayloadAttestationCache) Seen(root [32]byte, idx uint64) bool {
p.Lock()
defer p.Unlock()
if p.root != root {
return false
}
for _, agg := range p.attestations {
if agg == nil {
continue
}
if agg.AggregationBits.BitAt(idx) {
return true
}
}
return false
}
// messageToPayloadAttestation creates a PayloadAttestation with a single
// aggregated bit from the passed PayloadAttestationMessage
func messageToPayloadAttestation(att *eth.PayloadAttestationMessage, idx uint64) *eth.PayloadAttestation {
bits := primitives.NewPayloadAttestationAggregationBits()
bits.SetBitAt(idx, true)
data := &eth.PayloadAttestationData{
BeaconBlockRoot: bytesutil.SafeCopyBytes(att.Data.BeaconBlockRoot),
Slot: att.Data.Slot,
PayloadStatus: att.Data.PayloadStatus,
}
return &eth.PayloadAttestation{
AggregationBits: bits,
Data: data,
Signature: bytesutil.SafeCopyBytes(att.Signature),
}
}
// aggregateSigFromMessage returns the aggregated signature from a Payload
// Attestation by adding the passed signature in the PayloadAttestationMessage,
// no signature validation is performed.
func aggregateSigFromMessage(aggregated *eth.PayloadAttestation, message *eth.PayloadAttestationMessage) ([]byte, error) {
aggSig, err := bls.SignatureFromBytesNoValidation(aggregated.Signature)
if err != nil {
return nil, err
}
sig, err := bls.SignatureFromBytesNoValidation(message.Signature)
if err != nil {
return nil, err
}
return bls.AggregateSignatures([]bls.Signature{aggSig, sig}).Marshal(), nil
}
// Add adds a PayloadAttestationMessage to the internal cache of aggregated
// PayloadAttestations.
// If the index has already been seen for this attestation status the function does nothing.
// If the root is not the cached root, the function will clear the previous cache
// This function assumes that the message has already been validated. In
// particular that the signature is valid and that the block root corresponds to
// the given slot in the attestation data.
func (p *PayloadAttestationCache) Add(att *eth.PayloadAttestationMessage, idx uint64) error {
if att == nil || att.Data == nil || att.Data.BeaconBlockRoot == nil {
return errNilPayloadAttestationMessage
}
p.Lock()
defer p.Unlock()
root := [32]byte(att.Data.BeaconBlockRoot)
if p.root != root {
p.root = root
p.attestations = [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{}
}
agg := p.attestations[att.Data.PayloadStatus]
if agg == nil {
p.attestations[att.Data.PayloadStatus] = messageToPayloadAttestation(att, idx)
return nil
}
if agg.AggregationBits.BitAt(idx) {
return nil
}
sig, err := aggregateSigFromMessage(agg, att)
if err != nil {
return err
}
agg.Signature = sig
agg.AggregationBits.SetBitAt(idx, true)
return nil
}
// Get returns the aggregated PayloadAttestation for the given root and status
// if the root doesn't exist or status is invalid, the function returns nil.
func (p *PayloadAttestationCache) Get(root [32]byte, status primitives.PTCStatus) *eth.PayloadAttestation {
p.Lock()
defer p.Unlock()
if p.root != root {
return nil
}
if status >= primitives.PAYLOAD_INVALID_STATUS {
return nil
}
return eth.CopyPayloadAttestation(p.attestations[status])
}
// Clear clears the internal map
func (p *PayloadAttestationCache) Clear() {
p.Lock()
defer p.Unlock()
p.root = [32]byte{}
p.attestations = [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{}
}

View File

@@ -0,0 +1,143 @@
package cache
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestPayloadAttestationCache(t *testing.T) {
p := &PayloadAttestationCache{}
//Test Has seen
root := [32]byte{'r'}
idx := uint64(5)
require.Equal(t, false, p.Seen(root, idx))
// Test Add
msg := &eth.PayloadAttestationMessage{
Signature: bls.NewAggregateSignature().Marshal(),
Data: &eth.PayloadAttestationData{
BeaconBlockRoot: root[:],
Slot: 1,
PayloadStatus: primitives.PAYLOAD_PRESENT,
},
}
// Add new root
require.NoError(t, p.Add(msg, idx))
require.Equal(t, true, p.Seen(root, idx))
require.Equal(t, root, p.root)
att := p.attestations[primitives.PAYLOAD_PRESENT]
indices := att.AggregationBits.BitIndices()
require.DeepEqual(t, []int{int(idx)}, indices)
singleSig := bytesutil.SafeCopyBytes(msg.Signature)
require.DeepEqual(t, singleSig, att.Signature)
// Test Seen
require.Equal(t, true, p.Seen(root, idx))
require.Equal(t, false, p.Seen(root, idx+1))
// Add another attestation on the same data
msg2 := &eth.PayloadAttestationMessage{
Signature: bls.NewAggregateSignature().Marshal(),
Data: att.Data,
}
idx2 := uint64(7)
require.NoError(t, p.Add(msg2, idx2))
att = p.attestations[primitives.PAYLOAD_PRESENT]
indices = att.AggregationBits.BitIndices()
require.DeepEqual(t, []int{int(idx), int(idx2)}, indices)
require.DeepNotEqual(t, att.Signature, msg.Signature)
// Try again the same index
require.NoError(t, p.Add(msg2, idx2))
att2 := p.attestations[primitives.PAYLOAD_PRESENT]
indices = att.AggregationBits.BitIndices()
require.DeepEqual(t, []int{int(idx), int(idx2)}, indices)
require.DeepEqual(t, att, att2)
// Test Seen
require.Equal(t, true, p.Seen(root, idx2))
require.Equal(t, false, p.Seen(root, idx2+1))
// Add another payload status for a different payload status
msg3 := &eth.PayloadAttestationMessage{
Signature: bls.NewAggregateSignature().Marshal(),
Data: &eth.PayloadAttestationData{
BeaconBlockRoot: root[:],
Slot: 1,
PayloadStatus: primitives.PAYLOAD_WITHHELD,
},
}
idx3 := uint64(17)
require.NoError(t, p.Add(msg3, idx3))
att3 := p.attestations[primitives.PAYLOAD_WITHHELD]
indices3 := att3.AggregationBits.BitIndices()
require.DeepEqual(t, []int{int(idx3)}, indices3)
require.DeepEqual(t, singleSig, att3.Signature)
// Add a different root
root2 := [32]byte{'s'}
msg.Data.BeaconBlockRoot = root2[:]
require.NoError(t, p.Add(msg, idx))
require.Equal(t, root2, p.root)
require.Equal(t, true, p.Seen(root2, idx))
require.Equal(t, false, p.Seen(root, idx))
att = p.attestations[primitives.PAYLOAD_PRESENT]
indices = att.AggregationBits.BitIndices()
require.DeepEqual(t, []int{int(idx)}, indices)
}
func TestPayloadAttestationCache_Get(t *testing.T) {
root := [32]byte{1, 2, 3}
wrongRoot := [32]byte{4, 5, 6}
status := primitives.PAYLOAD_PRESENT
invalidStatus := primitives.PAYLOAD_INVALID_STATUS
cache := &PayloadAttestationCache{
root: root,
attestations: [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{
{
Signature: []byte{1},
},
{
Signature: []byte{2},
},
{
Signature: []byte{3},
},
},
}
t.Run("valid root and status", func(t *testing.T) {
result := cache.Get(root, status)
require.NotNil(t, result, "Expected a non-nil result")
require.DeepEqual(t, cache.attestations[status], result)
})
t.Run("invalid root", func(t *testing.T) {
result := cache.Get(wrongRoot, status)
require.IsNil(t, result)
})
t.Run("status out of bound", func(t *testing.T) {
result := cache.Get(root, invalidStatus)
require.IsNil(t, result)
})
t.Run("no attestation", func(t *testing.T) {
emptyCache := &PayloadAttestationCache{
root: root,
attestations: [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{},
}
result := emptyCache.Get(root, status)
require.IsNil(t, result)
})
}

View File

@@ -7,8 +7,8 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
)
// RegistrationCache is used to store the cached results of an Validator Registration request.

View File

@@ -0,0 +1,76 @@
package cache
import (
"bytes"
"sync"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
)
// ExecutionPayloadHeaders is used by the sync service to store signed execution payload headers after they pass validation,
// and filter out subsequent headers with lower value.
// The signed header from this cache could be used by the proposer when proposing the next slot.
type ExecutionPayloadHeaders struct {
headers map[primitives.Slot][]*enginev1.SignedExecutionPayloadHeader
sync.RWMutex
}
func NewExecutionPayloadHeaders() *ExecutionPayloadHeaders {
return &ExecutionPayloadHeaders{
headers: make(map[primitives.Slot][]*enginev1.SignedExecutionPayloadHeader),
}
}
// SaveSignedExecutionPayloadHeader saves the signed execution payload header to the cache.
// The cache stores headers for up to two slots. If the input slot is higher than the lowest slot
// currently in the cache, the lowest slot is removed to make space for the new header.
// Only the highest value header for a given parent block hash will be stored.
// This function assumes caller already checks header's slot is current or next slot, it doesn't account for slot validation.
func (c *ExecutionPayloadHeaders) SaveSignedExecutionPayloadHeader(header *enginev1.SignedExecutionPayloadHeader) {
c.Lock()
defer c.Unlock()
for s := range c.headers {
if s+1 < header.Message.Slot {
delete(c.headers, s)
}
}
// Add or update the header in the map
if _, ok := c.headers[header.Message.Slot]; !ok {
c.headers[header.Message.Slot] = []*enginev1.SignedExecutionPayloadHeader{header}
} else {
found := false
for i, h := range c.headers[header.Message.Slot] {
if bytes.Equal(h.Message.ParentBlockHash, header.Message.ParentBlockHash) && bytes.Equal(h.Message.ParentBlockRoot, header.Message.ParentBlockRoot) {
if header.Message.Value > h.Message.Value {
c.headers[header.Message.Slot][i] = header
}
found = true
break
}
}
if !found {
c.headers[header.Message.Slot] = append(c.headers[header.Message.Slot], header)
}
}
}
// SignedExecutionPayloadHeader returns the signed payload header for the given slot and parent block hash and block root.
// Returns nil if the header is not found.
// This should be used when the caller wants the header to match parent block hash and parent block root such as proposer choosing a header to propose.
func (c *ExecutionPayloadHeaders) SignedExecutionPayloadHeader(slot primitives.Slot, parentBlockHash []byte, parentBlockRoot []byte) *enginev1.SignedExecutionPayloadHeader {
c.RLock()
defer c.RUnlock()
if headers, ok := c.headers[slot]; ok {
for _, header := range headers {
if bytes.Equal(header.Message.ParentBlockHash, parentBlockHash) && bytes.Equal(header.Message.ParentBlockRoot, parentBlockRoot) {
return header
}
}
}
return nil
}

View File

@@ -0,0 +1,243 @@
package cache
import (
"testing"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func Test_SaveSignedExecutionPayloadHeader(t *testing.T) {
t.Run("First header should be added to cache", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
Value: 100,
},
}
c.SaveSignedExecutionPayloadHeader(header)
require.Equal(t, 1, len(c.headers))
require.Equal(t, header, c.headers[1][0])
})
t.Run("Second header with higher slot should be added, and both slots should be in cache", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header1 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
Value: 100,
},
}
header2 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 100,
},
}
c.SaveSignedExecutionPayloadHeader(header1)
c.SaveSignedExecutionPayloadHeader(header2)
require.Equal(t, 2, len(c.headers))
require.Equal(t, header1, c.headers[1][0])
require.Equal(t, header2, c.headers[2][0])
})
t.Run("Third header with higher slot should replace the oldest slot", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header1 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
Value: 100,
},
}
header2 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 100,
},
}
header3 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 3,
ParentBlockHash: []byte("parent3"),
Value: 100,
},
}
c.SaveSignedExecutionPayloadHeader(header1)
c.SaveSignedExecutionPayloadHeader(header2)
c.SaveSignedExecutionPayloadHeader(header3)
require.Equal(t, 2, len(c.headers))
require.Equal(t, header2, c.headers[2][0])
require.Equal(t, header3, c.headers[3][0])
})
t.Run("Header with same slot but higher value should replace the existing one", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header1 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 100,
},
}
header2 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 200,
},
}
c.SaveSignedExecutionPayloadHeader(header1)
c.SaveSignedExecutionPayloadHeader(header2)
require.Equal(t, 1, len(c.headers[2]))
require.Equal(t, header2, c.headers[2][0])
})
t.Run("Header with different parent block hash should be appended to the same slot", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header1 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent1"),
Value: 100,
},
}
header2 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 200,
},
}
c.SaveSignedExecutionPayloadHeader(header1)
c.SaveSignedExecutionPayloadHeader(header2)
require.Equal(t, 2, len(c.headers[2]))
require.Equal(t, header1, c.headers[2][0])
require.Equal(t, header2, c.headers[2][1])
})
}
func TestSignedExecutionPayloadHeader(t *testing.T) {
t.Run("Return header when slot and parentBlockHash match", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
ParentBlockRoot: []byte("root1"),
Value: 100,
},
}
c.SaveSignedExecutionPayloadHeader(header)
result := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte("root1"))
require.NotNil(t, result)
require.Equal(t, header, result)
})
t.Run("Return nil when no matching slot and parentBlockHash", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
ParentBlockRoot: []byte("root1"),
Value: 100,
},
}
c.SaveSignedExecutionPayloadHeader(header)
result := c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte("root1"))
require.IsNil(t, result)
})
t.Run("Return nil when no matching slot and parentBlockRoot", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
ParentBlockRoot: []byte("root1"),
Value: 100,
},
}
c.SaveSignedExecutionPayloadHeader(header)
result := c.SignedExecutionPayloadHeader(2, []byte("parent1"), []byte("root2"))
require.IsNil(t, result)
})
t.Run("Return header when there are two slots in the cache and a match is found", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header1 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
Value: 100,
},
}
header2 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 200,
},
}
c.SaveSignedExecutionPayloadHeader(header1)
c.SaveSignedExecutionPayloadHeader(header2)
// Check for the first header
result1 := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte{})
require.NotNil(t, result1)
require.Equal(t, header1, result1)
// Check for the second header
result2 := c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte{})
require.NotNil(t, result2)
require.Equal(t, header2, result2)
})
t.Run("Return nil when slot is evicted from cache", func(t *testing.T) {
c := NewExecutionPayloadHeaders()
header1 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 1,
ParentBlockHash: []byte("parent1"),
Value: 100,
},
}
header2 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 2,
ParentBlockHash: []byte("parent2"),
Value: 200,
},
}
header3 := &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
Slot: 3,
ParentBlockHash: []byte("parent3"),
Value: 300,
},
}
c.SaveSignedExecutionPayloadHeader(header1)
c.SaveSignedExecutionPayloadHeader(header2)
c.SaveSignedExecutionPayloadHeader(header3)
// The first slot should be evicted, so result should be nil
result := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte{})
require.IsNil(t, result)
// The second slot should still be present
result = c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte{})
require.NotNil(t, result)
require.Equal(t, header2, result)
// The third slot should be present
result = c.SignedExecutionPayloadHeader(3, []byte("parent3"), []byte{})
require.NotNil(t, result)
require.Equal(t, header3, result)
})
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
)
var (
@@ -92,17 +92,17 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
delay *= delayFactor
delay = math.Min(delay, maxDelay)
}
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))
item, exists := c.cache.Get(r)
if exists && item != nil {
skipSlotCacheHit.Inc()
span.AddAttributes(trace.BoolAttribute("hit", true))
span.SetAttributes(trace.BoolAttribute("hit", true))
return item.(state.BeaconState).Copy(), nil
}
skipSlotCacheMiss.Inc()
span.AddAttributes(trace.BoolAttribute("hit", false))
span.SetAttributes(trace.BoolAttribute("hit", false))
return nil, nil
}

View File

@@ -34,13 +34,13 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -14,9 +14,9 @@ import (
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"go.opencensus.io/trace"
)
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation

View File

@@ -10,7 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/math"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
)
// AttDelta contains rewards and penalties for a single attestation.

View File

@@ -7,7 +7,7 @@ import (
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"go.opencensus.io/trace"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
)
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.

View File

@@ -40,6 +40,7 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -49,7 +50,6 @@ go_library(
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
@@ -107,6 +107,7 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//testing/util/random:go_default_library",
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -14,10 +14,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"go.opencensus.io/trace"
)
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
@@ -110,25 +110,7 @@ func VerifyAttestationNoVerifySignature(
var indexedAtt ethpb.IndexedAtt
if att.Version() < version.Electra {
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
return errors.Wrap(err, "could not verify attestation bitfields")
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
} else {
if att.Version() >= version.Electra {
if att.GetData().CommitteeIndex != 0 {
return errors.New("committee index must be 0 post-Electra")
}
@@ -154,6 +136,29 @@ func VerifyAttestationNoVerifySignature(
if err != nil {
return err
}
} else {
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
if committee == nil {
return errors.New("no committee exist for this attestation")
}
if err := helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
return errors.Wrap(err, "failed to verify aggregation bitfield")
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
}
return attestation.IsValidAttestationIndices(ctx, indexedAtt)

View File

@@ -1,3 +1,5 @@
package blocks
var ProcessBLSToExecutionChange = processBLSToExecutionChange
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -213,6 +214,47 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
ExecutionRequests: &enginev1.ExecutionRequests{
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
Deposits: make([]*enginev1.DepositRequest, 0),
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
},
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateEPBS:
kzgs := make([][]byte, 0)
kzgRoot, err := ssz.KzgCommitmentsRoot(kzgs)
if err != nil {
return nil, err
}
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockEpbs{
Block: &ethpb.BeaconBlockEpbs{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyEpbs{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
SignedExecutionPayloadHeader: &enginev1.SignedExecutionPayloadHeader{
Message: &enginev1.ExecutionPayloadHeaderEPBS{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
BlobKzgCommitmentsRoot: kzgRoot[:],
},
Signature: make([]byte, 96),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
PayloadAttestations: make([]*ethpb.PayloadAttestation, 0),
},
},
Signature: params.BeaconConfig().EmptySignature[:],

View File

@@ -2,11 +2,13 @@ package blocks
import (
"bytes"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -200,13 +202,13 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
// block_hash=payload.block_hash,
// transactions_root=hash_tree_root(payload.transactions),
// )
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
var err error
if st.Version() >= version.Capella {
st, err = ProcessWithdrawals(st, payload)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
payload, err := body.Execution()
if err != nil {
return nil, err
}
if err := verifyBlobCommitmentCount(body); err != nil {
return nil, err
}
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
return nil, err
@@ -220,79 +222,64 @@ func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (sta
return st, nil
}
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
// Skip validation if the state is not merge compatible.
complete, err := IsMergeTransitionComplete(st)
if err != nil {
return err
}
if !complete {
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
if body.Version() < version.Deneb {
return nil
}
// Validate current header's parent hash matches state header's block hash.
h, err := st.LatestExecutionPayloadHeader()
kzgs, err := body.BlobKzgCommitments()
if err != nil {
return err
}
if !bytes.Equal(header.ParentHash(), h.BlockHash()) {
return ErrInvalidPayloadBlockHash
if len(kzgs) > field_params.MaxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
}
return nil
}
// ValidatePayloadHeader validates the payload header.
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
// Validate header's random mix matches with state in current epoch
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return err
}
if !bytes.Equal(header.PrevRandao(), random) {
return ErrInvalidPayloadPrevRandao
}
// Validate header's timestamp matches with state in current slot.
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
if err != nil {
return err
}
if header.Timestamp() != uint64(t.Unix()) {
return ErrInvalidPayloadTimeStamp
}
return nil
}
// ProcessPayloadHeader processes the payload header.
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
var err error
if st.Version() >= version.Capella {
st, err = ProcessWithdrawals(st, header)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
return nil, err
}
if err := ValidatePayloadHeader(st, header); err != nil {
return nil, err
}
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
return nil, err
}
return st, nil
}
// GetBlockPayloadHash returns the hash of the execution payload of the block
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
var payloadHash [32]byte
if IsPreBellatrixVersion(blk.Version()) {
return payloadHash, nil
if blk.Version() >= version.EPBS {
header, err := blk.Body().SignedExecutionPayloadHeader()
if err != nil {
return payloadHash, err
}
payload, err := header.Header()
if err != nil {
return payloadHash, err
}
return payload.BlockHash(), nil
}
payload, err := blk.Body().Execution()
if err != nil {
return payloadHash, err
if blk.Version() >= version.Bellatrix {
payload, err := blk.Body().Execution()
if err != nil {
return payloadHash, err
}
return bytesutil.ToBytes32(payload.BlockHash()), nil
}
return bytesutil.ToBytes32(payload.BlockHash()), nil
return payloadHash, nil
}
// GetBlockParentHash returns the hash of the parent execution payload
func GetBlockParentHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
var parentHash [32]byte
if blk.Version() >= version.EPBS {
header, err := blk.Body().SignedExecutionPayloadHeader()
if err != nil {
return parentHash, err
}
payload, err := header.Header()
if err != nil {
return parentHash, err
}
return payload.ParentBlockHash(), nil
}
if blk.Version() >= version.Bellatrix {
payload, err := blk.Body().Execution()
if err != nil {
return parentHash, err
}
return bytesutil.ToBytes32(payload.ParentHash()), nil
}
return parentHash, nil
}

View File

@@ -1,6 +1,7 @@
package blocks_test
import (
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -13,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -581,14 +583,18 @@ func Test_ProcessPayload(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrappedPayload, err := consensusblocks.WrappedExecutionPayload(tt.payload)
body, err := consensusblocks.NewBeaconBlockBody(&ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: tt.payload,
})
require.NoError(t, err)
st, err := blocks.ProcessPayload(st, wrappedPayload)
st, err := blocks.ProcessPayload(st, body)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
require.Equal(t, tt.err, err)
want, err := consensusblocks.PayloadToHeader(wrappedPayload)
payload, err := body.Execution()
require.NoError(t, err)
want, err := consensusblocks.PayloadToHeader(payload)
require.Equal(t, tt.err, err)
h, err := st.LatestExecutionPayloadHeader()
require.NoError(t, err)
@@ -609,13 +615,15 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
body, err := consensusblocks.NewBeaconBlockBody(&ethpb.BeaconBlockBodyCapella{
ExecutionPayload: payload,
})
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
_, err = blocks.ProcessPayload(st, body)
require.NoError(t, err)
}
func Test_ProcessPayloadHeader(t *testing.T) {
func Test_ProcessPayload_Blinded(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
@@ -663,7 +671,13 @@ func Test_ProcessPayloadHeader(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st, err := blocks.ProcessPayloadHeader(st, tt.header)
p, ok := tt.header.Proto().(*enginev1.ExecutionPayloadHeader)
require.Equal(t, true, ok)
body, err := consensusblocks.NewBeaconBlockBody(&ethpb.BlindedBeaconBlockBodyBellatrix{
ExecutionPayloadHeader: p,
})
require.NoError(t, err)
st, err := blocks.ProcessPayload(st, body)
if err != nil {
require.Equal(t, tt.err.Error(), err.Error())
} else {
@@ -728,7 +742,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err = blocks.ValidatePayloadHeader(st, tt.header)
err = blocks.ValidatePayload(st, tt.header)
require.Equal(t, tt.err, err)
})
}
@@ -785,7 +799,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
err = blocks.ValidatePayloadWhenMergeCompletes(tt.state, tt.header)
require.Equal(t, tt.err, err)
})
}
@@ -906,3 +920,15 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella {
Withdrawals: make([]*enginev1.Withdrawal, 0),
}
}
func TestVerifyBlobCommitmentCount(t *testing.T) {
b := &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}
rb, err := consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
rb, err = consensusblocks.NewBeaconBlock(b)
require.NoError(t, err)
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
}

View File

@@ -14,8 +14,8 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
@@ -115,15 +115,97 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
return val, nil
}
func checkWithdrawalsAgainstPayload(
executionData interfaces.ExecutionData,
numExpected int,
expectedRoot [32]byte,
) error {
var wdRoot [32]byte
if executionData.IsBlinded() {
r, err := executionData.WithdrawalsRoot()
if err != nil {
return errors.Wrap(err, "could not get withdrawals root")
}
copy(wdRoot[:], r)
} else {
wds, err := executionData.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
if len(wds) != numExpected {
return fmt.Errorf("execution payload header has %d withdrawals when %d were expected", len(wds), numExpected)
}
wdRoot, err = ssz.WithdrawalSliceRoot(wds, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return errors.Wrap(err, "could not get withdrawals root")
}
}
if expectedRoot != wdRoot {
return fmt.Errorf("expected withdrawals root %#x, got %#x", expectedRoot, wdRoot)
}
return nil
}
func processWithdrawalStateTransition(
st state.BeaconState,
expectedWithdrawals []*enginev1.Withdrawal,
partialWithdrawalsCount uint64,
) (err error) {
for _, withdrawal := range expectedWithdrawals {
err := helpers.DecreaseBalance(st, withdrawal.ValidatorIndex, withdrawal.Amount)
if err != nil {
return errors.Wrap(err, "could not decrease balance")
}
}
if st.Version() >= version.Electra {
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
return fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
}
}
if len(expectedWithdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expectedWithdrawals[len(expectedWithdrawals)-1].Index + 1); err != nil {
return errors.Wrap(err, "could not set next withdrawal index")
}
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(expectedWithdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return errors.Wrap(err, "could not set next withdrawal validator index")
}
return nil
}
// ProcessWithdrawals processes the validator withdrawals from the provided execution payload
// into the beacon state.
//
// Spec pseudocode definition:
//
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
// if state.fork.current_version >= EIP7732_FORK_VERSION :
// if not is_parent_block_full(state): # [New in EIP-7732]
// return
//
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
//
// assert len(payload.withdrawals) == len(expected_withdrawals)
// if state.fork.current_version >= EIP7732_FORK_VERSION :
// state.latest_withdrawals_root = hash_tree_root(expected_withdrawals) # [New in EIP-7732]
// else :
// assert len(payload.withdrawals) == len(expected_withdrawals)
//
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
// assert withdrawal == expected_withdrawal
@@ -148,76 +230,39 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
if st.Version() >= version.EPBS {
IsParentBlockFull, err := st.IsParentBlockFull()
if err != nil {
return nil, errors.Wrap(err, "could not check if parent block is full")
}
if !IsParentBlockFull {
return nil, nil
}
}
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}
var wdRoot [32]byte
if executionData.IsBlinded() {
r, err := executionData.WithdrawalsRoot()
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals root")
}
wdRoot = bytesutil.ToBytes32(r)
} else {
wds, err := executionData.Withdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals")
}
if len(wds) != len(expectedWithdrawals) {
return nil, fmt.Errorf("execution payload header has %d withdrawals when %d were expected", len(wds), len(expectedWithdrawals))
}
wdRoot, err = ssz.WithdrawalSliceRoot(wds, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, errors.Wrap(err, "could not get withdrawals root")
}
}
expectedRoot, err := ssz.WithdrawalSliceRoot(expectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals root")
}
if expectedRoot != wdRoot {
return nil, fmt.Errorf("expected withdrawals root %#x, got %#x", expectedRoot, wdRoot)
}
for _, withdrawal := range expectedWithdrawals {
err := helpers.DecreaseBalance(st, withdrawal.ValidatorIndex, withdrawal.Amount)
if st.Version() >= version.EPBS {
err = st.SetLastWithdrawalsRoot(expectedRoot[:])
if err != nil {
return nil, errors.Wrap(err, "could not decrease balance")
return nil, errors.Wrap(err, "could not set withdrawals root")
}
}
if st.Version() >= version.Electra {
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
}
}
if len(expectedWithdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expectedWithdrawals[len(expectedWithdrawals)-1].Index + 1); err != nil {
return nil, errors.Wrap(err, "could not set next withdrawal index")
}
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(expectedWithdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
if err := checkWithdrawalsAgainstPayload(executionData, len(expectedWithdrawals), expectedRoot); err != nil {
return nil, err
}
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return nil, errors.Wrap(err, "could not set next withdrawal validator index")
if err := processWithdrawalStateTransition(st, expectedWithdrawals, partialWithdrawalsCount); err != nil {
return nil, err
}
return st, nil
}

View File

@@ -22,6 +22,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -1132,13 +1133,407 @@ func TestProcessWithdrawals(t *testing.T) {
checkPostState(t, test.Control, post)
}
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
})
}
})
}
}
func TestProcessWithdrawalsEPBS(t *testing.T) {
const (
currentEpoch = primitives.Epoch(10)
epochInFuture = primitives.Epoch(12)
epochInPast = primitives.Epoch(8)
numValidators = 128
notWithdrawableIndex = 127
notPartiallyWithdrawable = 126
maxSweep = uint64(80)
)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
type args struct {
Name string
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
FullWithdrawalIndices []primitives.ValidatorIndex
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
Withdrawals []*enginev1.Withdrawal
PendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal // Electra
LatestBlockHash []byte // EIP-7732
}
type control struct {
NextWithdrawalValidatorIndex primitives.ValidatorIndex
NextWithdrawalIndex uint64
Balances map[uint64]uint64
}
type Test struct {
Args args
Control control
}
executionAddress := func(i primitives.ValidatorIndex) []byte {
wc := make([]byte, 20)
wc[19] = byte(i)
return wc
}
withdrawalAmount := func(i primitives.ValidatorIndex) uint64 {
return maxEffectiveBalance + uint64(i)*100000
}
fullWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
Index: idx,
ValidatorIndex: i,
Address: executionAddress(i),
Amount: withdrawalAmount(i),
}
}
PendingPartialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
return &enginev1.Withdrawal{
Index: idx,
ValidatorIndex: i,
Address: executionAddress(i),
Amount: withdrawalAmount(i) - maxEffectiveBalance,
}
}
tests := []Test{
{
Args: args{
Name: "success no withdrawals",
NextWithdrawalValidatorIndex: 10,
NextWithdrawalIndex: 3,
},
Control: control{
NextWithdrawalValidatorIndex: 90,
NextWithdrawalIndex: 3,
},
},
{
Args: args{
Name: "success one full withdrawal",
NextWithdrawalIndex: 3,
NextWithdrawalValidatorIndex: 5,
FullWithdrawalIndices: []primitives.ValidatorIndex{70},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(70, 3),
},
},
Control: control{
NextWithdrawalValidatorIndex: 85,
NextWithdrawalIndex: 4,
Balances: map[uint64]uint64{70: 0},
},
},
{
Args: args{
Name: "success one partial withdrawal",
NextWithdrawalIndex: 21,
NextWithdrawalValidatorIndex: 120,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 21),
},
},
Control: control{
NextWithdrawalValidatorIndex: 72,
NextWithdrawalIndex: 22,
Balances: map[uint64]uint64{7: maxEffectiveBalance},
},
},
{
Args: args{
Name: "success many full withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 25,
Balances: map[uint64]uint64{7: 0, 19: 0, 28: 0},
},
},
{
Args: args{
Name: "less than max sweep at end",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{80, 81, 82, 83},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(80, 22), fullWithdrawal(81, 23), fullWithdrawal(82, 24),
fullWithdrawal(83, 25),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 26,
Balances: map[uint64]uint64{80: 0, 81: 0, 82: 0, 83: 0},
},
},
{
Args: args{
Name: "less than max sweep and beginning",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{4, 5, 6},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(4, 22), fullWithdrawal(5, 23), fullWithdrawal(6, 24),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 25,
Balances: map[uint64]uint64{4: 0, 5: 0, 6: 0},
},
},
{
Args: args{
Name: "success many partial withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(7, 22), PendingPartialWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
},
},
Control: control{
NextWithdrawalValidatorIndex: 84,
NextWithdrawalIndex: 25,
Balances: map[uint64]uint64{
7: maxEffectiveBalance,
19: maxEffectiveBalance,
28: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success many withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
fullWithdrawal(28, 28),
},
},
Control: control{
NextWithdrawalValidatorIndex: 40,
NextWithdrawalIndex: 29,
Balances: map[uint64]uint64{
7: 0, 19: 0, 28: 0,
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
15: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success many withdrawals with pending partial withdrawals in state",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 88,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
fullWithdrawal(28, 28),
},
PendingPartialWithdrawals: []*ethpb.PendingPartialWithdrawal{
{
Index: 11,
Amount: withdrawalAmount(11) - maxEffectiveBalance,
},
},
},
Control: control{
NextWithdrawalValidatorIndex: 40,
NextWithdrawalIndex: 29,
Balances: map[uint64]uint64{
7: 0, 19: 0, 28: 0,
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
15: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "success more than max fully withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
FullWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(1, 22), fullWithdrawal(2, 23), fullWithdrawal(3, 24),
fullWithdrawal(4, 25), fullWithdrawal(5, 26), fullWithdrawal(6, 27),
fullWithdrawal(7, 28), fullWithdrawal(8, 29), fullWithdrawal(9, 30),
fullWithdrawal(21, 31), fullWithdrawal(22, 32), fullWithdrawal(23, 33),
fullWithdrawal(24, 34), fullWithdrawal(25, 35), fullWithdrawal(26, 36),
fullWithdrawal(27, 37),
},
},
Control: control{
NextWithdrawalValidatorIndex: 28,
NextWithdrawalIndex: 38,
Balances: map[uint64]uint64{
1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,
21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0,
},
},
},
{
Args: args{
Name: "success more than max partially withdrawals",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 0,
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
Withdrawals: []*enginev1.Withdrawal{
PendingPartialWithdrawal(1, 22), PendingPartialWithdrawal(2, 23), PendingPartialWithdrawal(3, 24),
PendingPartialWithdrawal(4, 25), PendingPartialWithdrawal(5, 26), PendingPartialWithdrawal(6, 27),
PendingPartialWithdrawal(7, 28), PendingPartialWithdrawal(8, 29), PendingPartialWithdrawal(9, 30),
PendingPartialWithdrawal(21, 31), PendingPartialWithdrawal(22, 32), PendingPartialWithdrawal(23, 33),
PendingPartialWithdrawal(24, 34), PendingPartialWithdrawal(25, 35), PendingPartialWithdrawal(26, 36),
PendingPartialWithdrawal(27, 37),
},
},
Control: control{
NextWithdrawalValidatorIndex: 28,
NextWithdrawalIndex: 38,
Balances: map[uint64]uint64{
1: maxEffectiveBalance,
2: maxEffectiveBalance,
3: maxEffectiveBalance,
4: maxEffectiveBalance,
5: maxEffectiveBalance,
6: maxEffectiveBalance,
7: maxEffectiveBalance,
8: maxEffectiveBalance,
9: maxEffectiveBalance,
21: maxEffectiveBalance,
22: maxEffectiveBalance,
23: maxEffectiveBalance,
24: maxEffectiveBalance,
25: maxEffectiveBalance,
26: maxEffectiveBalance,
27: maxEffectiveBalance,
},
},
},
{
Args: args{
Name: "Parent Node is not full",
NextWithdrawalIndex: 22,
NextWithdrawalValidatorIndex: 4,
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
Withdrawals: []*enginev1.Withdrawal{
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
},
LatestBlockHash: []byte{1, 2, 3},
},
},
}
checkPostState := func(t *testing.T, expected control, st state.BeaconState) {
l, err := st.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalValidatorIndex, l)
n, err := st.NextWithdrawalIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalIndex, n)
balances := st.Balances()
for idx, bal := range expected.Balances {
require.Equal(t, bal, balances[idx])
}
}
prepareValidators := func(st state.BeaconState, arguments args) error {
validators := make([]*ethpb.Validator, numValidators)
if err := st.SetBalances(make([]uint64, numValidators)); err != nil {
return err
}
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = epochInFuture
v.WithdrawalCredentials = make([]byte, 32)
v.WithdrawalCredentials[31] = byte(i)
if err := st.UpdateBalancesAtIndex(primitives.ValidatorIndex(i), v.EffectiveBalance-uint64(rand.Intn(1000))); err != nil {
return err
}
validators[i] = v
}
for _, idx := range arguments.FullWithdrawalIndices {
if idx != notWithdrawableIndex {
validators[idx].WithdrawableEpoch = epochInPast
}
if err := st.UpdateBalancesAtIndex(idx, withdrawalAmount(idx)); err != nil {
return err
}
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
}
for _, idx := range arguments.PendingPartialWithdrawalIndices {
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
if err := st.UpdateBalancesAtIndex(idx, withdrawalAmount(idx)); err != nil {
return err
}
}
return st.SetValidators(validators)
}
for _, test := range tests {
t.Run(test.Args.Name, func(t *testing.T) {
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = maxSweep
if test.Args.Withdrawals == nil {
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
}
if test.Args.FullWithdrawalIndices == nil {
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
if test.Args.PendingPartialWithdrawalIndices == nil {
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
}
slot, err := slots.EpochStart(currentEpoch)
require.NoError(t, err)
var st state.BeaconState
var p interfaces.ExecutionData
spb := &ethpb.BeaconStateEPBS{
Slot: slot,
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
PendingPartialWithdrawals: test.Args.PendingPartialWithdrawals,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderEPBS{
BlockHash: []byte{},
},
LatestBlockHash: test.Args.LatestBlockHash,
}
st, err = state_native.InitializeFromProtoUnsafeEpbs(spb)
require.NoError(t, err)
env := random.ExecutionPayloadEnvelope(t)
env.Payload.Withdrawals = test.Args.Withdrawals
wp, err := consensusblocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
p, err = wp.Execution()
require.NoError(t, err)
err = prepareValidators(st, test.Args)
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Args.Name == "Parent Node is not full" {
require.IsNil(t, post)
require.IsNil(t, err)
} else {
require.NoError(t, err)
checkPostState(t, test.Control, post)
}
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
})
}
}
func TestProcessBLSToExecutionChanges(t *testing.T) {
spb := &ethpb.BeaconStateCapella{
Fork: &ethpb.Fork{

View File

@@ -34,13 +34,13 @@ go_library(
"//contracts/deposit:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)

View File

@@ -11,10 +11,10 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
@@ -49,7 +49,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
return errors.New("nil state")
}
currentEpoch := slots.ToEpoch(st.Slot())
nextEpoch := slots.ToEpoch(st.Slot()) + 1
var nextPendingConsolidation uint64
pendingConsolidations, err := st.PendingConsolidations()
@@ -66,7 +66,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
nextPendingConsolidation++
continue
}
if sourceValidator.WithdrawableEpoch > currentEpoch {
if sourceValidator.WithdrawableEpoch > nextEpoch {
break
}

View File

@@ -13,12 +13,12 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/contracts/deposit"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
// ProcessDeposits is one of the operations performed on each processed
@@ -248,7 +248,7 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
// constants
ffe := params.BeaconConfig().FarFutureEpoch
curEpoch := slots.ToEpoch(st.Slot())
nextEpoch := slots.ToEpoch(st.Slot()) + 1
for _, balanceDeposit := range deposits {
v, err := st.ValidatorAtIndexReadOnly(balanceDeposit.Index)
@@ -259,7 +259,7 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
// If the validator is currently exiting, postpone the deposit until after the withdrawable
// epoch.
if v.ExitEpoch() < ffe {
if curEpoch <= v.WithdrawableEpoch() {
if nextEpoch <= v.WithdrawableEpoch() {
depositsToPostpone = append(depositsToPostpone, balanceDeposit)
} else {
// The deposited balance will never become active. Therefore, we increase the balance but do

View File

@@ -30,21 +30,21 @@ import (
// or validator.effective_balance + UPWARD_THRESHOLD < balance
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
bals := state.Balances()
bals := st.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val == nil {
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
return nil, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
}
balance := bals[idx]
@@ -53,13 +53,13 @@ func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
}
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
val.EffectiveBalance = effectiveBal
return true, val, nil
newVal = val.Copy()
newVal.EffectiveBalance = effectiveBal
}
return false, val, nil
return newVal, nil
}
return state.ApplyToEveryValidator(validatorFunc)
return st.ApplyToEveryValidator(validatorFunc)
}

Some files were not shown because too many files have changed in this diff Show More