Compare commits

..

147 Commits

Author SHA1 Message Date
Kasey Kirkham
c961e262c5 backfill service 2023-09-06 16:13:13 -05:00
kasey
a81518ca91 wrap ctx.Err, not the random last value of err (#12859)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-09-06 15:12:09 -05:00
Preston Van Loon
2b4c83b739 db: Use blobRotatingKey alias for readability (#12855) 2023-09-06 15:45:14 +00:00
terencechain
e3527e1e48 db: fix save blob sidecar using wrong prefix (#12849)
* db: fix save blob sidecar using wrong prefix

* tests: add more

* tests: check blobs no longer there

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-09-06 15:25:26 +00:00
terencechain
2e3b013ae7 fix(sync/rpc): use blobs by root when requesting block (#12837)
* fix(sync/rpc): use blobs by root when requesting block

Fix: use blobs by root for pending block queue (option B)

* fix: skip htr if block is older than deneb
2023-09-06 13:52:42 +00:00
Radosław Kapka
34f507f4b2 HTTP implementation of /eth/v1/beacon/pool/sync_committees (#12782)
* impl

* protos

* tests

* register endpoint

* test fix

* test fix

* remove path

* more test fixes

* cleanup

* bzl

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-09-01 17:58:50 +00:00
terencechain
3c98c27a32 chore(engine): remove debug log (#12836)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-09-01 13:22:05 +00:00
james-prysm
9a7393a2e3 Deneb produce blockv3 (#12708)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-09-01 12:51:27 +02:00
ICHINOSE Shogo
3a8be9fcf8 fix fuzz testing (#12790)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-31 21:08:27 +02:00
Preston Van Loon
a3212a5428 config/params: Require placeholderFields to be sorted and distinct (#12834)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-31 18:06:45 +00:00
james-prysm
bfecbf97db REST implementation of /eth/v1/validator/prepare_beacon_proposer (#12781)
* implementing prepare beacon proposer endpoint in full rest

* fixing type naming on validator client

* fixing linting

* Update beacon-chain/rpc/eth/shared/structs.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/eth/validator/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/eth/validator/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback

* fixing unit tests

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-31 17:02:39 +00:00
terencechain
d08e85b818 chore: improve att error (#12813) 2023-08-31 08:41:57 -05:00
terencechain
3d74ecb6d4 fix: add back non parallel proposer (#12811) 2023-08-31 08:41:57 -05:00
kasey
d0ae692f4c give deneb it's own field count config value (#12806)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
terencechain
b335eba625 fix: pbv2 condition (#12812) 2023-08-31 08:41:57 -05:00
terencechain
cd340cb0c8 fix: set current and prev epoch participations (#12814) 2023-08-31 08:41:57 -05:00
kasey
34b995d0de only initialize kzg if it will be used (#12803)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
kasey
5cf7b0458f re-use metadata bucket, group db constants (#12804)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
kasey
65aaf4878c Nishant's feedback, consistent tmp var (#12805)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
kasey
c32fe2e71b Save blob last stage init sync (#12764)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2023-08-31 08:41:57 -05:00
kasey
7068f97245 binary search is the wrong tool for the job, derp (#12757)
* binary search is the wrong tool for the job, derp

* restore protection from array index panics

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
Potuz
3a1b4a19c2 Blob channel (#12753)
* Add a new blob channel

* fix mock

* reset the channel

* keep a map of channels

* gazelle

* do not overwrite map

* remove pre-declaration
2023-08-31 08:41:57 -05:00
terencechain
7ed2a731d3 test: skip deneb e2e test with tracking (#12751)
* test: skip deneb e2e test with an issue

* Unskip more stuff
2023-08-31 08:41:57 -05:00
terencechain
f5d5a506ff fix: validator returning the right # of blobs (#12738) 2023-08-31 08:41:57 -05:00
terencechain
f49b2b9657 Update gossip_scoring_params.go (#12732) 2023-08-31 08:41:57 -05:00
terencechain
5179305071 feat: save blobs to db for subscriber (#12734) 2023-08-31 08:41:57 -05:00
james-prysm
726601221e fixing route handler and small cleanup items (#12726) 2023-08-31 08:41:57 -05:00
terencechain
83aaf043e4 Fix: payload attribute v3 return nil (#12736) 2023-08-31 08:41:57 -05:00
terencechain
96a9a6fc16 feat: add validator blob signing (#12730) 2023-08-31 08:41:57 -05:00
terencechain
6314f7fcbf feat: log blob commitment count (#12723) 2023-08-31 08:41:57 -05:00
terencechain
d210a2368f feat: implement eip4788 (#12570) 2023-08-31 08:41:57 -05:00
terencechain
708aee0fcb test:beta.1 deneb tests (#12680) 2023-08-31 08:41:57 -05:00
Potuz
57b8da08d1 Validate blobs feature (#12574) 2023-08-31 08:41:57 -05:00
james-prysm
70997e2489 renaming data to blobs in execution payload (#12664) 2023-08-31 08:41:57 -05:00
james-prysm
f602a44bee implementing publishblockv2 for deneb (#12662)
fixing broken test (#12673)
2023-08-31 08:41:57 -05:00
terencechain
6402301871 Test: add deneb spec test version beta.0 (#12610) 2023-08-31 08:41:57 -05:00
kasey
3476112d63 sync blobs in initial-sync (#12522) 2023-08-31 08:41:57 -05:00
Preston Van Loon
48f1f69695 Deneb: Implement EIP-7044 (#12577) 2023-08-31 08:41:57 -05:00
terencechain
f3ca945bb4 feat: add builder override (#12601) 2023-08-31 08:41:57 -05:00
terencechain
2bc91d20f9 feat: EIP7045 increase max att inclusion slot (#12565) 2023-08-31 08:41:57 -05:00
kasey
30ccff4465 support deneb in fullPayloadFromPayloadBody (#12649)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
james-prysm
4c1f398a96 adding ssz handling to produce block endpoint for deneb (#12632) 2023-08-31 08:41:57 -05:00
james-prysm
5a62aa4c07 adding ssz capabilities for deneb publish block v1 (#12622) 2023-08-31 08:41:57 -05:00
terencechain
b8fb602fc4 fix: use hexutil bytes for blobs bundle (#12623) 2023-08-31 08:41:57 -05:00
terencechain
a40640c9b9 fix: add blob batch flags to main/usage (#12614) 2023-08-31 08:41:57 -05:00
james-prysm
a41c01cb5e migrating get blob from eip4844 and deneb-integration (#12603) 2023-08-31 08:41:57 -05:00
james-prysm
6050501eb1 migrating Beacon API code from eip4844 to deneb-integration (#12568) 2023-08-31 08:41:57 -05:00
terencechain
590309e3fc feat(deneb): proposer rpc to handle builder flow (#12554) 2023-08-31 08:41:57 -05:00
terencechain
03fb7ed20c fix: devnet6 interop issues (#12545) 2023-08-31 08:41:57 -05:00
terencechain
95106a7533 feat(prysmctl): request blobs (#12544) 2023-08-31 08:41:57 -05:00
kasey
642988f977 ssz detection support for deneb (#12537)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
kasey
2121d1f4f4 Integrate generate genesis (#12523)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
kasey
87cd96afd3 BlobSidecarsByRoot (#12420)
* BlobSidecarsByRoot RPC handler

* BlobSidecarsByRange rpc handler (#12499)

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-31 08:41:57 -05:00
james-prysm
8a67f616f0 introducing deneb changes and blobs to builder (#12477) 2023-08-31 08:41:57 -05:00
terencechain
bcdf1c92a7 feat(config): update max blobs per block to 6 (#12512) 2023-08-31 08:41:57 -05:00
terencechain
ff6b670d95 feat(blockchain): update payload attribute for deneb (#12509) 2023-08-31 08:41:57 -05:00
terencechain
2be87f623b feat(rpc/validator): stream and use deneb block (#12510) 2023-08-31 08:41:57 -05:00
terencechain
5b1634b335 Update ProposeBeaconBlock Prysm RPC for Deneb (Non builder) (#12495) 2023-08-31 08:41:57 -05:00
terencechain
2dadf36fa9 Add data gas used field to execution payload (#12488) 2023-08-31 08:41:57 -05:00
terencechain
c10e56513d Proposer RPC: GetBlock for Deneb (#12481) 2023-08-31 08:41:57 -05:00
terencechain
7670fe7d6f Update block's commitment size (#12470) 2023-08-31 08:41:57 -05:00
terencechain
5deb1f755b Deneb: implements engine API end points (#12384) 2023-08-31 08:41:57 -05:00
james-prysm
e2750646fa validator signing deneb (#12449)
* validator signing feature migrated from eip4844 branch, added unit tests

* Update proto/prysm/v1alpha1/beacon_block.proto

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-31 08:41:57 -05:00
terencechain
e0c69396ed Add Deneb upgrade function (#12433) 2023-08-31 08:41:57 -05:00
terencechain
c03571138c P2P: broadcast blob (#12419) 2023-08-31 08:41:57 -05:00
terencechain
e3ac8b7745 Add Blob Gossip (#12413) 2023-08-31 08:41:57 -05:00
Radosław Kapka
319daa7f6c Deneb integration: Beacon API getStateV2 (#12424)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Fix Migration Of State (#12423)
2023-08-31 08:41:57 -05:00
terencechain
43e8ae4dca Deneb DB methods (#12379) 2023-08-31 08:41:57 -05:00
terencechain
61283e51e1 Add Deneb state (#12375) 2023-08-31 08:41:57 -05:00
terencechain
8e91df9155 Add deneb block to consensus types (#12368) 2023-08-31 08:41:57 -05:00
terencechain
bd86c7c8f3 Add deneb protobufs (#12363) 2023-08-31 08:41:57 -05:00
Raul Jordan
7bc44d9cfe Include Validation Error in ForkchoiceUpdatedResponse (#12828)
* include validation error in engine resp

* add to new payload
2023-08-29 11:28:44 -07:00
Preston Van Loon
63126fd51f Holesky support (#12821)
* Add holesky config


Copy config/params from deneb-integration

* Add --holesky flag

* Handle no genesis block error and suggest to user a workaround
2023-08-29 14:27:50 +00:00
Delweng
0ea0afb494 beacon-chain/execution: use pointer instead of value to reduce copy (#12818)
* beacon-chain/execution: use pointer instead of value to reduce value copy

Signed-off-by: jsvisa <delweng@gmail.com>

* beacon-chain/execution: fix unit test

Signed-off-by: jsvisa <delweng@gmail.com>

---------

Signed-off-by: jsvisa <delweng@gmail.com>
2023-08-29 13:24:54 +00:00
m3diumrare
286f6a9931 Minor typo in CLI flag (#12808)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-25 21:14:08 +00:00
Dhruv Bodani
4ef29a24e4 Add REST implementation of node version endpoint (#12809) 2023-08-25 13:14:08 +00:00
Dhruv Bodani
30eaddf48f Add support for validator count endpoint (#12752) 2023-08-24 23:11:41 +02:00
Nishant Das
bcf728b9ff Fix Sepolia Version (#12792) 2023-08-24 10:07:41 +00:00
terencechain
6f6c06a95b chore: remove unused proto import (#12787) 2023-08-24 01:09:06 +00:00
Preston Van Loon
5433502055 Add deneb config/params from deneb-integration (#12783)
Co-authored-by: Terence Tsao <terence@prysmaticlabs.com>
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-08-23 20:07:25 +00:00
Radosław Kapka
f0d54254ed HTTP implementation of voluntary exit pool endpoints (#12777)
* impl

* protos

* tests

* review

* test fix
2023-08-23 07:51:03 +00:00
james-prysm
da244c9e9a adding register validator route (#12776) 2023-08-22 15:44:00 +00:00
Sammy Rosso
e49f1321b7 HTTP Beacon API: /eth/v1/beacon/blocks/{block_id}/root (#12716)
* Initial setup

* Fix all tests and handler func

* Cleanup

* Fix the tests

* Remove middleware endpoint

* Add endpoint

* Switch query param to path

* Fix e2e test

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Radek & James Reviews

* Check blockId length

* Add length check

* Fix

* Revert "Fix e2e test"

This reverts commit 6289c1de5c.

* use v1alpha1 server to get root

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-22 06:18:02 +00:00
Raul Jordan
5eb8b88073 Update Fuzz List Timeout Github Workflow (#12768) 2023-08-22 04:48:13 +00:00
james-prysm
b0fc368185 local-block-value-boost flag from float64 to uint64 [BREAKING CHANGE] (#12765) 2023-08-21 22:34:34 +00:00
Raul Jordan
777ca06b78 Add Github Go Fuzzing Workflow With Cron Schedule (#12756)
* add in github workflow for fuzzing that runs with cron

* every day

* go version

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-21 16:09:51 +00:00
james-prysm
4d520460e0 REST implementation of /eth/v1/validator/register_validator (#12758)
* migrating v2 to http native url

* removing old code

* gaz

* updating beacon api validator client code

* fixing tests
2023-08-21 15:42:43 +00:00
Potuz
27d8b1c358 add more descriptive log on FFG-LMD consistency (#12763)
* add more descriptive log on FFG-LMD consistency

* Update beacon-chain/blockchain/receive_attestation.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* Update beacon-chain/blockchain/receive_attestation.go

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-21 14:06:59 +00:00
terencechain
17500dcfca Fix: download genesis log (#12759) 2023-08-20 14:46:40 -07:00
Radosław Kapka
0452fd02e8 HTTP Beacon API: /pool/attestations (#12735)
* attestations

* post

* tests

* Revert "Auxiliary commit to revert individual files from afede4d949a7519902be2f1e0c485306c4ccdea7"

This reverts commit 9de74879e0c41e43183da2fa7e63094cac030abe.

* remove test

* remove redundant return

* Update beacon-chain/rpc/eth/beacon/handlers_pool.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* review

* include index in broadcast log

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-18 14:29:40 +00:00
terencechain
09d761e1ab fix(proposer): verify attestations without mutating state (#12704)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-17 16:37:19 +00:00
Potuz
9a741c52d1 Remove quadratic loops when exiting (#12737)
* Remove quadratic loops when exiting

* unit test

* fix tests

* lint

* duplicated imports

* handle new error

* revert spurious unit test

* radek's review

* fix name

* use errors.Is

* fix error check

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-08-17 15:38:46 +00:00
Sammy Rosso
baed8da9c0 HTTP Beacon API: /eth/v1/validator/sync_committee_contribution (#12698)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-08-17 12:01:24 +02:00
Matthew Obwaya
33410a0ec1 Remove uses of deprecated go_embed rules_go (#12719)
* Remove prysm_web_ui http_archive from WORKSPACE

* Remove go_embed_data_dependencies() from WORKSPACE

* Remove go_embed_data target from validator/web/BUILD.bazel

* Remove # gazelle:ignore site_data.go annotation from validator/web/BUILD.bazel

* Run bazel run //:gazelle

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-08-16 15:47:09 +00:00
Nishant Das
cda1797d4e Remove Alpine Images From Prysm (#12749) 2023-08-16 14:01:09 +00:00
Potuz
e7f6048b8c Use last optimistic status on batches (#12741)
* Use last optimistic status on batches

* more descriptive errors

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-16 01:40:36 +00:00
Potuz
418959565f set optimistic status in head at init sync (#12748) 2023-08-16 00:47:56 +00:00
Raul Jordan
8229f3eb84 Prysm Web UI Release v2.0.4 (#12746)
Co-authored-by: james-prysm <james-prysm@users.noreply.github.com>
2023-08-15 21:11:44 +00:00
Nishant Das
4098d098aa Shift Error Logs To Debug (#12739) 2023-08-15 14:44:54 +00:00
anukul
46c72798c7 HTTP Beacon API: /eth/v1/validator/attestation_data (#12634)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-14 17:56:36 +03:00
Preston Van Loon
a5474200de Update geth to 1.12.2 (#12731) 2023-08-14 17:48:11 +08:00
Preston Van Loon
a85b4445fc Update bazel to 6.3.2 (#12725)
* Update bazel version to v6.3.2

* Disable legacy external runfile generation for CI

* Revert "Disable legacy external runfile generation for CI"

This reverts commit 1911f293d6.

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-11 19:08:33 +00:00
Preston Van Loon
751dd847b8 Update rules go & gazelle (#12721)
* Update rules_go to v0.40.0

* Update gazelle to v0.26.0

* Update gazelle to v0.27.0

* Update gazelle to v0.28.0

* Update gazelle to v0.29.0

* Update gazelle to v0.30.0

* Update gazelle to v0.31.0
2023-08-11 16:39:35 +00:00
Preston Van Loon
aeb7a45864 Update geth to 1.12.1 (#12718)
* Update geth to 1.12.1

* Fix //cmd/validator/flags:go_default_test

* fix //beacon-chain/node:go_default_test

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-08-11 10:45:42 +00:00
Radosław Kapka
e952fd802b HTTP Beacon API: /eth/v1/validator/beacon_committee_subscriptions (#12700)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

* in progress

* implementation

* remove test file

* remove duplicate

* tests

* HTTP Beacon API: `/eth/v1/validator/sync_committee_subscriptions`

* pointers, pointers everywhere

* fix

* fix after merge

* implementation

* tests

* test fixes

* review

* clear cache in validator tests

* add missing stuff

* compilation fix

* remove `required` from bool

* remove time fetcher from tests

* no need to convert

* add conversion

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-10 18:00:19 +00:00
Nishant Das
b511eef848 Update BLST to v0.3.11 (#12717)
* add changes

* Remove server.c exclusion in headers

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-08-10 22:51:19 +08:00
dependabot[bot]
7aa043892b Bump github.com/libp2p/go-libp2p from 0.27.5 to 0.27.8 (#12709)
* Bump github.com/libp2p/go-libp2p from 0.27.5 to 0.27.8

Bumps [github.com/libp2p/go-libp2p](https://github.com/libp2p/go-libp2p) from 0.27.5 to 0.27.8.
- [Release notes](https://github.com/libp2p/go-libp2p/releases)
- [Changelog](https://github.com/libp2p/go-libp2p/blob/master/CHANGELOG.md)
- [Commits](https://github.com/libp2p/go-libp2p/compare/v0.27.5...v0.27.8)

---
updated-dependencies:
- dependency-name: github.com/libp2p/go-libp2p
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

* gazelle and remove libp2p patch. The patch is merged upstream as of this version

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-08-09 17:24:07 +00:00
Radosław Kapka
36be057a11 HTTP Beacon API: /eth/v1/node/syncing (#12706)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-09 17:23:59 +02:00
Radosław Kapka
049e608c75 HTTP Beacon API: /eth/v1/validator/sync_committee_subscriptions (#12689)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

* in progress

* implementation

* remove test file

* remove duplicate

* tests

* HTTP Beacon API: `/eth/v1/validator/sync_committee_subscriptions`

* pointers, pointers everywhere

* fix

* fix after merge

* review

* James' review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-08 22:54:04 +00:00
Preston Van Loon
4541598850 Update go to 1.20.7 (#12707) 2023-08-08 21:39:22 +00:00
Sammy Rosso
8c08854dd0 Fix prysmctl writing empty JSON/YAML files (#12599)
* Setup

* add in marshalable

* Cleanup

* Raul' review

* Remove yaml Marshaler

* minimal fixes

* same imports

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-08 15:20:41 +00:00
m3diumrare
d2ff995eb2 Fix reported effective balance for unknown/pending validators (#12693)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-07 09:19:02 -05:00
anukul
56a0315dde fix: set CoreService in beaconv1alpha1.Server (#12702) 2023-08-06 18:16:54 -07:00
anukul
634133fedc use struct in beacon-chain/rpc/core to store dependencies (#12701) 2023-08-05 22:54:12 +02:00
terencechain
c1c1b7ecfa Feat: aggregate parallel default (#12699) 2023-08-04 16:03:10 +00:00
Nishant Das
9a4670ec64 add changes (#12697)
Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-08-04 22:05:47 +08:00
Radosław Kapka
a664a07303 Multi Value Slice (#12616)
* multi value slice

* extract helper function

* comments

* setup godoc fix

* value benchmarks

* use guid

* fix bug when deleting items

* remove callback and rename MultiValue

* godoc

* tiny change

* Nishant's review

* typos

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-08-04 12:42:54 +00:00
Potuz
dd14d5cef0 refactor slot tickers with intervals (#12440)
* refactor slot tickers with intervals

* GoDoc

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-03 20:55:16 -03:00
Radosław Kapka
3a09405bb7 HTTP Beacon API: /eth/v1/validator/aggregate_and_proofs (#12686)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

* in progress

* implementation

* remove test file

* remove duplicate

* tests

* review

* test fixes

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-03 22:24:23 +00:00
terencechain
cb59081887 Remove: span for convert to indexed attestation (#12687)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-03 16:17:46 +00:00
Nishant Das
a820d4dcc8 Minor Optimization on InnerShuffleList (#12690)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-03 06:56:07 +00:00
terencechain
dbc17cf2ca Fix: use correct context for UpdateCommitteeCache (#12691) 2023-08-03 06:00:58 +00:00
terencechain
d38762772a test: add execution payload operation tests (#12685)
* test: add execution payload operation tests

* thanks!

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Update testing/spectest/shared/bellatrix/operations/execution_payload.go

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-02 08:16:46 +00:00
Potuz
e5d1eb885d Update caches blocking (#12679)
* update NSC together with epoch boundary caches

* block when updating caches

* reviews

* removal of very useful helper because the reviewers requested it :)

* use IsEpochEnd
2023-08-02 05:59:09 +00:00
terencechain
a9d7701081 test: add missing random and fork transition spec tests (#12681)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-01 14:08:40 +00:00
Bharath Vedartham
33abe6eb90 add metric to check if validator is in next sync commitee (#12650)
* add metric to check if validator is in next sync commitee

* Update validator/client/validator.go

* Update validator/client/metrics.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2023-08-01 08:25:10 +00:00
terencechain
c342c9a14e fix: update nil check for new validator (#12677) 2023-08-01 04:13:57 +00:00
Radosław Kapka
a9b003e1fe HTTP Beacon API: /eth/v1/validator/contribution_and_proofs (#12660)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-07-31 17:32:39 +00:00
Radosław Kapka
955175b7eb Update server-side events dependency (#12676)
* Update server-side events dependency

* go sum

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-31 15:51:19 +00:00
Potuz
c17682940e only handle epoch boundary on canonical blocks (#12666)
* only handle epoch boundary on canonical blocks

* do not fetch headstate
2023-07-31 12:07:09 -03:00
Nishant Das
db450f53a4 Fix Update Of Committee Cache (#12668)
* fix it

* sammy's comment

* fix tests

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-28 13:34:03 +00:00
terencechain
493905ee9e feat: add state regren duration metric (#12672) 2023-07-27 19:33:25 +00:00
james-prysm
e449724034 Bugfix: proposer-settings edge case for activating validators (#12671) 2023-07-27 16:45:16 +00:00
terencechain
a44c209be0 feat: add metric for block gossip time (#12670) 2023-07-27 15:17:24 +00:00
terencechain
183e72b194 fix: update epoch + 1 (#12667) 2023-07-27 18:53:36 +08:00
Potuz
337c254161 update shuffling caches at epoch boundary (#12661)
* update shuffling caches at epoch boundary

* move span

* do not advance to a past slot
2023-07-26 18:46:18 +00:00
james-prysm
ec60cab2bf Bugfix: Metrics adding fix pending validators balance (#12665) 2023-07-26 16:09:24 +00:00
Potuz
ded00495e7 Parallelize cl el validation (#12590)
* Parallelize EL and CL block validation

* fix by Nishant
2023-07-26 12:23:51 -03:00
Nishant Das
113172d8aa Exit Initial Sync Early (#12659)
* add check

* fix test
2023-07-26 00:38:25 +00:00
Radosław Kapka
2b40c44879 Use the correct root in consensus validation (#12657)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-07-25 12:04:08 +02:00
Radosław Kapka
fc193b09bf HTTP Beacon API: /eth/v1/validator/aggregate_attestation (#12643)
* initial implementation

* it works

* tests

* extracted helper functions

* fix code

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-24 22:16:45 +00:00
Radosław Kapka
a0d53f5155 Register GetValidatorPerformance as POST (#12658) 2023-07-24 21:17:08 +00:00
terencechain
ff3d2bc69f fix: add mainnet withdrawals and bls spec tests (#12655) 2023-07-24 14:13:02 +00:00
Nishant Das
dd403f830c clean up logging (#12653) 2023-07-24 20:29:50 +08:00
Raul Jordan
e9c8e84618 Integrate Read-Only-Lock-on-Get LRU Cache for Public Keys (#12646)
* use new lru cache

* update build

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-22 04:37:53 +00:00
james-prysm
9c250dd4c2 Builder gas limit fix default to 0 in some cases (#12647)
* adding fix for nethermind's findings on gaslimit =0 on some default setups

* adding in default gaslimit check

* fixing linting on complexity

* fixing cognitive complexity linting marker

* fixing unit test and bug with referencing

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-07-22 03:44:50 +00:00
Potuz
f97db3b738 Different parallel hashing (#12639)
* Paralellize hashing of large lists

* add unit test

* add file

* do not parallelize on low processor count

* revert minimal proc count

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-07-21 20:36:20 -04:00
543 changed files with 28568 additions and 16939 deletions

View File

@@ -1 +1 @@
6.2.1
6.3.2

45
.github/workflows/fuzz.yml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: "fuzz"
on:
workflow_dispatch:
schedule:
- cron: "0 12 * * *"
permissions:
contents: write
pull-requests: write
jobs:
list:
runs-on: ubuntu-latest
timeout-minutes: 180
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.20'
- id: list
uses: shogo82148/actions-go-fuzz/list@v0
with:
tags: fuzz,develop
outputs:
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
fuzz:
runs-on: ubuntu-latest
timeout-minutes: 360
needs: list
strategy:
fail-fast: false
matrix:
include: ${{fromJson(needs.list.outputs.fuzz-tests)}}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.20'
- uses: shogo82148/actions-go-fuzz/run@v0
with:
packages: ${{ matrix.package }}
fuzz-regexp: ${{ matrix.func }}
fuzz-time: "20m"
tags: fuzz,develop

View File

@@ -133,8 +133,8 @@ nogo(
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],
"//conditions:default": [
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
],
}),
)

View File

@@ -67,10 +67,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
sha256 = "5982e5463f171da99e3bdaeff8c0f48283a7a5f396ec5282910b9e8a49c0dd7e",
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
],
)
@@ -94,10 +94,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
],
)
@@ -172,7 +172,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.6",
go_version = "1.20.7",
nogo = "@//:nogo",
)
@@ -213,7 +213,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.4.0-alpha.2"
consensus_spec_version = "v1.4.0-beta.1"
bls_test_version = "v0.1.1"
@@ -229,7 +229,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bfba887cbe043907adf884cf6d18f2e8a31e34e9245397b84af1f54ed22b706a",
sha256 = "24399b60ce3fbeb2311952d213dc3731b6dcb0f8881b016c283de5b518d2bbba",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -245,7 +245,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9ff77bef0ca1e39bcee2769075c89f0f91fb8f89ad38a1b3e0c31cf6732650ad",
sha256 = "8e656ee48d2e2ebc9cf9baedb81f27925bc625b3e3fbb2883444a08758a5884a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -261,7 +261,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "fbcc3c9898110c675e5de9c27cb667ad7cadf930db7ebb5c6bba15d7be95bf8a",
sha256 = "8bd137da6cc57a25383bfac5bc37e31265098145278bd8002b88e24c8b4718b9",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -276,7 +276,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "9fff1bcdd0e5857797197800db091c3675b2c11b54f704fe4de1ba683bed7ba5",
sha256 = "2bc1edb6e4a4f86c00317c04618a90b0ca29ee1eba833d0a64dd67fdd83fdbe3",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -313,10 +313,19 @@ filegroup(
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
name = "holesky_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"custom_config_data/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "4116c8acb54eb3ca28cc4dc9bc688e08e25da91d70ed1f2622f02d3c33eba922",
strip_prefix = "holesky-76057d57ab1f585519ecb606a9e5f7780e925a37",
url = "https://github.com/eth-clients/holesky/archive/76057d57ab1f585519ecb606a9e5f7780e925a37.tar.gz", # Aug 27, 2023
)
http_archive(
@@ -332,22 +341,6 @@ http_archive(
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
# External dependencies
http_archive(
name = "prysm_web_ui",
build_file_content = """
filegroup(
name = "site",
srcs = glob(["**/*"]),
visibility = ["//visibility:public"],
)
""",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
],
)
load("//:deps.bzl", "prysm_deps")
# gazelle:repository_macro deps.bzl%prysm_deps
@@ -379,10 +372,6 @@ load(
_cc_image_repos()
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
go_embed_data_dependencies()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
@@ -391,10 +380,6 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"path"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
@@ -109,8 +110,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", sr).
WithField("block_root", br).
WithField("state_root", hexutil.Encode(sr[:])).
WithField("block_root", hexutil.Encode(br[:])).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,

View File

@@ -17,6 +17,7 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
@@ -45,6 +46,7 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -844,8 +844,8 @@ func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedB
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DataGasUsed: 1,
ExcessDataGas: 1,
BlobGasUsed: 1,
ExcessBlobGas: 2,
},
},
},

View File

@@ -11,6 +11,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -104,14 +105,10 @@ type Uint256 struct {
*big.Int
}
func isValidUint256(bi *big.Int) bool {
return bi.Cmp(big.NewInt(0)) >= 0 && bi.BitLen() <= 256
}
func stringToUint256(s string) (Uint256, error) {
bi := new(big.Int)
_, ok := bi.SetString(s, 10)
if !ok || !isValidUint256(bi) {
if !ok || !math.IsValidUint256(bi) {
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", s)
}
return Uint256{Int: bi}, nil
@@ -120,7 +117,7 @@ func stringToUint256(s string) (Uint256, error) {
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
func sszBytesToUint256(b []byte) (Uint256, error) {
bi := bytesutil.LittleEndianBytesToBigInt(b)
if !isValidUint256(bi) {
if !math.IsValidUint256(bi) {
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", b)
}
return Uint256{Int: bi}, nil
@@ -128,7 +125,7 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
func (s Uint256) SSZBytes() []byte {
if !isValidUint256(s.Int) {
if !math.IsValidUint256(s.Int) {
return []byte{}
}
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
@@ -155,7 +152,7 @@ func (s *Uint256) UnmarshalText(t []byte) error {
if !ok {
return errors.Wrapf(errDecodeUint256, "value=%s", t)
}
if !isValidUint256(z) {
if !math.IsValidUint256(z) {
return errors.Wrapf(errDecodeUint256, "value=%s", t)
}
s.Int = z
@@ -175,7 +172,7 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
// MarshalText returns a text byte representation of Uint256.
func (s Uint256) MarshalText() ([]byte, error) {
if !isValidUint256(s.Int) {
if !math.IsValidUint256(s.Int) {
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
}
return []byte(s.String()), nil
@@ -1202,8 +1199,8 @@ type ExecutionPayloadHeaderDeneb struct {
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
DataGasUsed Uint64String `json:"data_gas_used"` // new in deneb
ExcessDataGas Uint64String `json:"excess_data_gas"` // new in deneb
BlobGasUsed Uint64String `json:"blob_gas_used"` // new in deneb
ExcessBlobGas Uint64String `json:"excess_blob_gas"` // new in deneb
*v1.ExecutionPayloadHeaderDeneb
}
@@ -1230,8 +1227,8 @@ func (h *ExecutionPayloadHeaderDeneb) MarshalJSON() ([]byte, error) {
BlockHash: h.ExecutionPayloadHeaderDeneb.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeaderDeneb.TransactionsRoot,
WithdrawalsRoot: h.ExecutionPayloadHeaderDeneb.WithdrawalsRoot,
DataGasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.DataGasUsed),
ExcessDataGas: Uint64String(h.ExecutionPayloadHeaderDeneb.ExcessDataGas),
BlobGasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.BlobGasUsed),
ExcessBlobGas: Uint64String(h.ExecutionPayloadHeaderDeneb.ExcessBlobGas),
})
}
@@ -1267,8 +1264,8 @@ func (h *ExecutionPayloadHeaderDeneb) ToProto() (*v1.ExecutionPayloadHeaderDeneb
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
DataGasUsed: uint64(h.DataGasUsed),
ExcessDataGas: uint64(h.ExcessDataGas),
BlobGasUsed: uint64(h.BlobGasUsed),
ExcessBlobGas: uint64(h.ExcessBlobGas),
}, nil
}
@@ -1301,8 +1298,8 @@ type ExecutionPayloadDeneb struct {
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
Withdrawals []Withdrawal `json:"withdrawals"`
DataGasUsed Uint64String `json:"data_gas_used"` // new in deneb
ExcessDataGas Uint64String `json:"excess_data_gas"` // new in deneb
BlobGasUsed Uint64String `json:"blob_gas_used"` // new in deneb
ExcessBlobGas Uint64String `json:"excess_blob_gas"` // new in deneb
}
// BlobsBundle is a field in ExecutionPayloadDenebAndBlobsBundle.
@@ -1392,8 +1389,8 @@ func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
Transactions: txs,
Withdrawals: withdrawals,
DataGasUsed: uint64(p.DataGasUsed),
ExcessDataGas: uint64(p.ExcessDataGas),
BlobGasUsed: uint64(p.BlobGasUsed),
ExcessBlobGas: uint64(p.ExcessBlobGas),
}, nil
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/math"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -137,8 +138,8 @@ var testExampleHeaderResponseDeneb = `{
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"data_gas_used": "1",
"excess_data_gas": "1"
"blob_gas_used": "1",
"excess_blob_gas": "2"
},
"blinded_blobs_bundle": {
"commitments": [
@@ -588,8 +589,8 @@ var testExampleExecutionPayloadDeneb = fmt.Sprintf(`{
"amount": "1"
}
],
"data_gas_used": "2",
"excess_data_gas": "3"
"blob_gas_used": "2",
"excess_blob_gas": "3"
},
"blobs_bundle": {
"commitments": [
@@ -851,13 +852,13 @@ func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
},
{
expected: "2",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.DataGasUsed),
name: "ExecPayloadResponse.ExecutionPayload.DataGasUsed",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlobGasUsed),
name: "ExecPayloadResponse.ExecutionPayload.BlobGasUsed",
},
{
expected: "3",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessDataGas),
name: "ExecPayloadResponse.ExecutionPayload.ExcessDataGas",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessBlobGas),
name: "ExecPayloadResponse.ExecutionPayload.ExcessBlobGas",
},
}
for _, c := range cases {
@@ -873,8 +874,8 @@ func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
assert.Equal(t, uint64(1), w.Amount.Uint64())
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.DataGasUsed))
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessDataGas))
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.BlobGasUsed))
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessBlobGas))
}
func TestExecutionPayloadResponseToProto(t *testing.T) {
@@ -1038,8 +1039,8 @@ func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
Amount: 1,
},
},
DataGasUsed: 2,
ExcessDataGas: 3,
BlobGasUsed: 2,
ExcessBlobGas: 3,
}
require.DeepEqual(t, expected, p)
commitment, err := hexutil.Decode("0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
@@ -1312,8 +1313,8 @@ func pbExecutionPayloadHeaderDeneb(t *testing.T) *v1.ExecutionPayloadHeaderDeneb
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DataGasUsed: 1,
ExcessDataGas: 1,
BlobGasUsed: 1,
ExcessBlobGas: 2,
}
}
@@ -1343,7 +1344,7 @@ func TestExecutionPayloadHeaderDeneb_MarshalJSON(t *testing.T) {
}
b, err := json.Marshal(h)
require.NoError(t, err)
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","data_gas_used":"1","excess_data_gas":"1"}`
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","blob_gas_used":"1","excess_blob_gas":"2"}`
require.Equal(t, expected, string(b))
}
@@ -1405,40 +1406,40 @@ func TestIsValidUint256(t *testing.T) {
_, ok = value.SetString("-10000000000000000000000000000000000000000000000000000000000000000", 16)
require.Equal(t, true, ok)
require.Equal(t, 257, value.BitLen())
require.Equal(t, false, isValidUint256(value))
require.Equal(t, false, math.IsValidUint256(value))
// negative uint256.max
_, ok = value.SetString("-ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16)
require.Equal(t, true, ok)
require.Equal(t, 256, value.BitLen())
require.Equal(t, false, isValidUint256(value))
require.Equal(t, false, math.IsValidUint256(value))
// negative number
_, ok = value.SetString("-1", 16)
require.Equal(t, true, ok)
require.Equal(t, false, isValidUint256(value))
require.Equal(t, false, math.IsValidUint256(value))
// uint256.min
_, ok = value.SetString("0", 16)
require.Equal(t, true, ok)
require.Equal(t, true, isValidUint256(value))
require.Equal(t, true, math.IsValidUint256(value))
// positive number
_, ok = value.SetString("1", 16)
require.Equal(t, true, ok)
require.Equal(t, true, isValidUint256(value))
require.Equal(t, true, math.IsValidUint256(value))
// uint256.max
_, ok = value.SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16)
require.Equal(t, true, ok)
require.Equal(t, 256, value.BitLen())
require.Equal(t, true, isValidUint256(value))
require.Equal(t, true, math.IsValidUint256(value))
// uint256.max + 1
_, ok = value.SetString("10000000000000000000000000000000000000000000000000000000000000000", 16)
require.Equal(t, true, ok)
require.Equal(t, 257, value.BitLen())
require.Equal(t, false, isValidUint256(value))
require.Equal(t, false, math.IsValidUint256(value))
}
func TestUint256Unmarshal(t *testing.T) {

View File

@@ -1,7 +1,9 @@
package api
const (
VersionHeader = "Eth-Consensus-Version"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
VersionHeader = "Eth-Consensus-Version"
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
ExecutionPayloadValueHeader = "Eth-Execution-Payload-Value"
JsonMediaType = "application/json"
OctetStreamMediaType = "application/octet-stream"
)

View File

@@ -21,6 +21,7 @@ go_library(
"process_block.go",
"process_block_helpers.go",
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"service.go",
"weak_subjectivity_checks.go",
@@ -35,6 +36,7 @@ go_library(
deps = [
"//async:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -88,6 +90,7 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -17,8 +17,6 @@ var (
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
errBlockDoesNotExist = errors.New("could not find block in DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.

View File

@@ -5,6 +5,7 @@ import (
"crypto/sha256"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
@@ -27,6 +28,8 @@ import (
"go.opencensus.io/trace"
)
const blobCommitmentVersionKZG uint8 = 0x01
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
@@ -214,18 +217,15 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
var lastValidHash []byte
if blk.Version() >= version.Deneb {
var kzgs [][]byte
kzgs, err = blk.Block().Body().BlobKzgCommitments()
var versionedHashes []common.Hash
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
}
versionedHashes := make([][32]byte, len(kzgs))
for i := range versionedHashes {
versionedHashes[i] = kzgToVersionedHash(kzgs[i])
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes)
pr := common.Hash(blk.Block().ParentRoot())
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
} else {
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{} /*empty version hashes before Deneb*/)
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
}
switch err {
case nil:
@@ -326,7 +326,24 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella, version.Deneb:
case version.Deneb:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return false, emptyAttri, 0
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
Withdrawals: withdrawals,
ParentBeaconBlockRoot: headRoot,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
@@ -360,30 +377,36 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return true, attr, proposerID
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
for _, root := range blkRoots {
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
return err
}
// Delete block also deletes the state as well.
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
// No op if the sidecar does not exist.
if err := s.cfg.BeaconDB.DeleteBlobSidecar(ctx, root); err != nil {
return err
}
}
return nil
}
const (
blobCommitmentVersionKZG uint8 = 0x01
)
func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([]common.Hash, error) {
commitments, err := body.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
}
// kzgToVersionedHash implements kzg_to_versioned_hash from EIP-4844
func kzgToVersionedHash(kzg []byte) (h [32]byte) {
h = sha256.Sum256(kzg)
h[0] = blobCommitmentVersionKZG
return
versionedHashes := make([]common.Hash, len(commitments))
for i, commitment := range commitments {
versionedHashes[i] = sha256.Sum256(commitment)
versionedHashes[i][0] = blobCommitmentVersionKZG
}
return versionedHashes, nil
}

View File

@@ -894,6 +894,12 @@ func Test_GetPayloadAttributeDeneb(t *testing.T) {
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
attrV3, err := attr.PbV3()
require.NoError(t, err)
hr := service.headRoot()
require.Equal(t, hr, [32]byte(attrV3.ParentBeaconBlockRoot))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
@@ -1081,3 +1087,28 @@ func TestService_getPayloadHash(t *testing.T) {
require.NoError(t, err)
require.DeepEqual(t, [32]byte{'a'}, h)
}
func TestKZGCommitmentToVersionedHashes(t *testing.T) {
kzg1 := make([]byte, 96)
kzg1[10] = 'a'
kzg2 := make([]byte, 96)
kzg2[1] = 'b'
commitments := [][]byte{kzg1, kzg2}
blk := &ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
Body: &ethpb.BeaconBlockBodyDeneb{
BlobKzgCommitments: commitments,
},
},
}
b, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
vhs, err := kzgCommitmentsToVersionedHashes(b.Block().Body())
require.NoError(t, err)
vh0 := "0x01cf2315c97658a7ed54ada181765e23b3fadb5150fab39509f631c0b9af4566"
vh1 := "0x01e27ce28e527eb07196b31af0f5fa1882ace701a682022ab779f816ac39d47e"
require.Equal(t, 2, len(vhs))
require.Equal(t, vhs[0].String(), vh0)
require.Equal(t, vhs[1].String(), vh1)
}

View File

@@ -182,7 +182,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState, optimistic bool) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
@@ -198,7 +198,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
if err != nil {
return err
}
if err := s.setHeadInitialSync(r, bCp, hs); err != nil {
if err := s.setHeadInitialSync(r, bCp, hs, optimistic); err != nil {
return errors.Wrap(err, "could not set head")
}
return nil
@@ -227,7 +227,7 @@ func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState, optimistic bool) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -237,9 +237,10 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySig
return err
}
s.head = &head{
root: root,
block: bCp,
state: state,
root: root,
block: bCp,
state: state,
optimistic: optimistic,
}
return nil
}

View File

@@ -0,0 +1,31 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"trusted_setup.go",
"validation.go",
],
embedsrcs = ["trusted_setup.json"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg",
visibility = ["//visibility:public"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"trusted_setup_test.go",
"validation_test.go",
],
embed = [":go_default_library"],
deps = [
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
],
)

View File

@@ -0,0 +1,28 @@
package kzg
import (
_ "embed"
"encoding/json"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/pkg/errors"
)
var (
//go:embed trusted_setup.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context
)
func Start() error {
parsedSetup := GoKZG.JSONTrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
if err != nil {
return errors.Wrap(err, "could not parse trusted setup JSON")
}
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
if err != nil {
return errors.Wrap(err, "could not initialize go-kzg context")
}
return nil
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,12 @@
package kzg
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestStart(t *testing.T) {
require.NoError(t, Start())
require.NotNil(t, kzgContext)
}

View File

@@ -0,0 +1,47 @@
package kzg
import (
"fmt"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// IsDataAvailable checks that
// - all blobs in the block are available
// - Expected KZG commitments match the number of blobs in the block
// - That the number of proofs match the number of blobs
// - That the proofs are verified against the KZG commitments
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error {
if len(commitments) != len(sidecars) {
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
len(commitments), len(sidecars))
}
if len(commitments) == 0 {
return nil
}
blobs := make([]GoKZG.Blob, len(commitments))
proofs := make([]GoKZG.KZGProof, len(commitments))
cmts := make([]GoKZG.KZGCommitment, len(commitments))
for i, sidecar := range sidecars {
blobs[i] = bytesToBlob(sidecar.Blob)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
cmts[i] = bytesToCommitment(commitments[i])
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
copy(ret[:], blob)
return
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
copy(ret[:], commitment)
return
}
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
}

View File

@@ -0,0 +1,25 @@
package kzg
import (
"testing"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestIsDataAvailable(t *testing.T) {
sidecars := make([]*ethpb.BlobSidecar, 0)
commitments := make([][]byte, 0)
require.NoError(t, IsDataAvailable(commitments, sidecars))
}
func TestBytesToAny(t *testing.T) {
bytes := []byte{0x01, 0x02}
blob := GoKZG.Blob{0x01, 0x02}
commitment := GoKZG.KZGCommitment{0x01, 0x02}
proof := GoKZG.KZGProof{0x01, 0x02}
require.DeepEqual(t, blob, bytesToBlob(bytes))
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
}

View File

@@ -61,6 +61,14 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
txsPerSlotCount.Set(float64(len(txs)))
}
}
if b.Version() >= version.Deneb {
kzgs, err := b.Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Failed to get blob KZG commitments")
} else if len(kzgs) > 0 {
log = log.WithField("blobCommitmentCount", len(kzgs))
}
}
log.Info("Finished applying state transition")
return nil
}
@@ -73,7 +81,7 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
log.WithFields(logrus.Fields{
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
@@ -87,7 +95,8 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
"deposits": len(block.Body().Deposits()),
}).Debug("Synced new block")
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
@@ -15,6 +16,7 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -86,18 +88,6 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -109,7 +99,7 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
if err != nil {
log.WithError(err).Error("Could not check if block is optimistic")
log.WithError(err).Debug("Could not check if block is optimistic")
optimistic = true
}
@@ -126,8 +116,29 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
})
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
return err
if headRoot == blockRoot {
// Updating next slot state cache can happen in the background
// except in the epoch boundary in which case we lock to handle
// the shuffling and proposer caches updates.
// We handle these caches only on canonical
// blocks, otherwise this will be handled by lateBlockTasks
slot := postState.Slot()
if slots.IsEpochEnd(slot) {
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
return errors.Wrap(err, "could not handle epoch boundary")
}
} else {
go func() {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Error("could not update next slot state cache")
}
}()
}
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
@@ -151,19 +162,14 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock,
blockRoots [][32]byte) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
if len(blks) == 0 || len(blockRoots) == 0 {
if len(blks) == 0 {
return errors.New("no blocks provided")
}
if len(blks) != len(blockRoots) {
return errWrongBlockCount
}
if err := consensusblocks.BeaconBlockIsNil(blks[0]); err != nil {
return invalidBlock{error: err}
}
@@ -213,7 +219,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
boundaries[b.Root()] = preState.Copy()
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
@@ -246,11 +252,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
var isValidPayload bool
for i, b := range blks {
root := b.Root()
isValidPayload, err = s.notifyNewPayload(ctx,
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
return s.handleInvalidExecutionError(ctx, err, blockRoots[i], b.Block().ParentRoot())
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -262,13 +269,13 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
JustifiedCheckpoint: jCheckpoints[i],
FinalizedCheckpoint: fCheckpoints[i]}
pendingNodes[len(blks)-i-1] = args
if err := s.saveInitSyncBlock(ctx, blockRoots[i], b); err != nil {
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
tracing.AnnotateError(span, err)
return err
}
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{
Slot: b.Block().Slot(),
Root: blockRoots[i][:],
Root: root[:],
}); err != nil {
tracing.AnnotateError(span, err)
return err
@@ -292,8 +299,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
return err
}
}
lastB := blks[len(blks)-1]
lastBR := lastB.Root()
// Also saves the last post state which to be used as pre state for the next batch.
lastBR := blockRoots[len(blks)-1]
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
return err
}
@@ -311,7 +319,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
lastB := blks[len(blks)-1]
arg := &notifyForkchoiceUpdateArg{
headState: preState,
headRoot: lastBR,
@@ -320,63 +327,51 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update committee cache")
}
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update proposer index cache")
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Could not update committee cache")
}
if err := helpers.UpdateProposerIndicesInCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
}
}()
return nil
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
if err != nil {
return err
}
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
e := coreTime.CurrentEpoch(copied)
if err := helpers.UpdateProposerIndicesInCache(ctx, copied, e); err != nil {
return err
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateProposerIndicesInCache(slotCtx, copied, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
}
}()
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
return err
}
// Update caches at epoch boundary slot.
// The following updates have shortcut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
headSt, err := s.HeadState(ctx)
if err != nil {
return err
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
return err
}
// return early if we are advancing to a past epoch
if slot < headState.Slot() {
return nil
}
return nil
if !slots.IsEpochEnd(slot) {
return nil
}
copied := headState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, slot+1)
if err != nil {
return err
}
return s.updateEpochBoundaryCaches(ctx, copied)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
@@ -504,11 +499,84 @@ func (s *Service) runLateBlockTasks() {
}
}
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if signed.Version() < version.Deneb {
return nil
}
block := signed.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if slots.ToEpoch(block.Slot())+params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest > primitives.Epoch(s.CurrentSlot()) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get KZG commitments")
}
existingBlobs := len(kzgCommitments)
if existingBlobs == 0 {
return nil
}
// Read first from db in case we have the blobs
s.blobNotifier.Lock()
var nc *blobNotifierChan
var ok bool
nc, ok = s.blobNotifier.chanForRoot[root]
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
if err == nil {
if len(sidecars) >= existingBlobs {
delete(s.blobNotifier.chanForRoot, root)
s.blobNotifier.Unlock()
return kzg.IsDataAvailable(kzgCommitments, sidecars)
}
}
// Create the channel if it didn't exist already the index map will be
// created later anyway
if !ok {
nc = &blobNotifierChan{channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
s.blobNotifier.chanForRoot[root] = nc
}
// We have more commitments in the block than blobs in database
// We sync the channel indices with the sidecars
nc.indices = make(map[uint64]struct{})
for _, sidecar := range sidecars {
nc.indices[sidecar.Index] = struct{}{}
}
s.blobNotifier.Unlock()
channelWrites := len(sidecars)
for {
select {
case <-nc.channel:
channelWrites++
if channelWrites == existingBlobs {
s.blobNotifier.Lock()
delete(s.blobNotifier.chanForRoot, root)
s.blobNotifier.Unlock()
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get blob sidecars")
}
return kzg.IsDataAvailable(kzgCommitments, sidecars)
}
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
}
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot
// it also updates the next slot cache to deal with skipped slots.
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if s.CurrentSlot() == s.HeadSlot() {
return
}
@@ -516,8 +584,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
s.headLock.RLock()
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
@@ -528,7 +598,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.

View File

@@ -49,8 +49,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.ReadOnlySignedBeaconBlock
var blkRoots [][32]byte
var blks []consensusblocks.ROBlock
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
@@ -63,16 +62,15 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
blks = append(blks, rwsb)
}
err := service.onBlockBatch(ctx, blks, blkRoots[1:])
require.ErrorIs(t, errWrongBlockCount, err)
err = service.onBlockBatch(ctx, blks, blkRoots)
err := service.onBlockBatch(ctx, blks)
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
require.Equal(t, blkRoots[63], jroot)
require.Equal(t, blks[63].Root(), jroot)
require.Equal(t, primitives.Epoch(2), service.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch)
}
@@ -84,8 +82,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
var blks []interfaces.ReadOnlySignedBeaconBlock
var blkRoots [][32]byte
var blks []consensusblocks.ROBlock
blkCount := 4
for i := 0; i <= blkCount; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
@@ -94,13 +91,12 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
bState, err = transition.ExecuteStateTransition(ctx, bState, wsb)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, root, wsb))
blks = append(blks, wsb)
blkRoots = append(blkRoots, root)
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
blks = append(blks, rwsb)
}
require.NoError(t, service.onBlockBatch(ctx, blks, blkRoots))
require.NoError(t, service.onBlockBatch(ctx, blks))
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
@@ -539,8 +535,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
require.NoError(t, service.handleEpochBoundary(ctx, s.Slot(), s, []byte{}))
}
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
@@ -1933,8 +1928,10 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []interfaces.ReadOnlySignedBeaconBlock{wsb}, [][32]byte{root}))
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)

View File

@@ -62,7 +62,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
return err
}
if !bytes.Equal(a.Data.Target.Root, r) {
return errors.New("FFG and LMD votes are not consistent")
return fmt.Errorf("FFG and LMD votes are not consistent, block root: %#x, target root: %#x, canonical target root: %#x", a.Data.BeaconBlockRoot, a.Data.Target.Root, r)
}
return nil
}
@@ -86,23 +86,30 @@ func (s *Service) spawnProcessAttestationsRoutine() {
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
return
case <-pat.C():
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
case <-st.C():
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, s.CurrentSlot())
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
}
}()

View File

@@ -0,0 +1,22 @@
package blockchain
import fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
// for the blocroot `root` is ready in the database
func (s *Service) SendNewBlobEvent(root [32]byte, index uint64) {
s.blobNotifier.Lock()
nc, ok := s.blobNotifier.chanForRoot[root]
if !ok {
nc = &blobNotifierChan{indices: make(map[uint64]struct{}), channel: make(chan struct{}, fieldparams.MaxBlobsPerBlock)}
s.blobNotifier.chanForRoot[root] = nc
}
_, ok = nc.indices[index]
if ok {
s.blobNotifier.Unlock()
return
}
nc.indices[index] = struct{}{}
s.blobNotifier.Unlock()
nc.channel <- struct{}{}
}

View File

@@ -8,9 +8,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -22,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
@@ -30,11 +33,17 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
}
// BlobReceiver interface defines the methods of chain service for receiving new
// blobs
type BlobReceiver interface {
SendNewBlobEvent([32]byte, uint64)
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashings *ethpb.AttesterSlashing)
@@ -61,19 +70,34 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
currentEpoch := coreTime.CurrentEpoch(preState)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
postState, err := s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
isValidPayload, err := s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
}
// The rest of block processing takes a lock on forkchoice.
s.cfg.ForkChoiceStore.Lock()
@@ -81,13 +105,20 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
return errors.Wrap(err, "could not save post state info")
}
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
return err
}
if coreTime.CurrentEpoch(postState) > currentEpoch {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}
@@ -150,7 +181,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
@@ -158,28 +189,31 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []interfaces.Rea
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks, blkRoots); err != nil {
if err := s.onBlockBatch(ctx, blocks); err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err
}
for i, b := range blocks {
lastBR := blocks[len(blocks)-1].Root()
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(lastBR)
if err != nil {
lastSlot := blocks[len(blocks)-1].Block().Slot()
log.WithError(err).Errorf("Could not check if block is optimistic, Root: %#x, Slot: %d", lastBR, lastSlot)
optimistic = true
}
for _, b := range blocks {
blockCopy, err := b.Copy()
if err != nil {
return err
}
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blkRoots[i])
if err != nil {
log.WithError(err).Error("Could not check if block is optimistic")
optimistic = true
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: blockCopy.Block().Slot(),
BlockRoot: blkRoots[i],
BlockRoot: b.Root(),
SignedBlock: blockCopy,
Verified: true,
Optimistic: optimistic,

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -228,13 +227,11 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
s, _ := minimalTestService(t, WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}))
err := s.saveGenesisData(ctx, genesis)
require.NoError(t, err)
root, err := tt.args.block.Block.HashTreeRoot()
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
blks := []interfaces.ReadOnlySignedBeaconBlock{wsb}
roots := [][32]byte{root}
err = s.ReceiveBlockBatch(ctx, blks, roots)
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb})
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {

View File

@@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async/event"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
@@ -35,7 +36,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
@@ -46,22 +46,22 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
blobNotifier *blobNotifier
}
// config options for the service.
@@ -90,16 +90,37 @@ type config struct {
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
type blobNotifierChan struct {
indices map[uint64]struct{}
channel chan struct{}
}
type blobNotifier struct {
sync.RWMutex
chanForRoot map[[32]byte]*blobNotifierChan
}
// NewService instantiates a new block service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, opts ...Option) (*Service, error) {
var err error
if params.DenebEnabled() {
err = kzg.Start()
if err != nil {
return nil, errors.Wrap(err, "could not initialize go-kzg context")
}
}
ctx, cancel := context.WithCancel(ctx)
bn := &blobNotifier{
chanForRoot: make(map[[32]byte]*blobNotifierChan),
}
srv := &Service{
ctx: ctx,
cancel: cancel,
boundaryRoots: [][32]byte{},
checkpointStateCache: cache.NewCheckpointStateCache(),
initSyncBlocks: make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock),
blobNotifier: bn,
cfg: &config{ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache()},
}
for _, opt := range opts {
@@ -110,7 +131,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
if srv.clockSetter == nil {
return nil, ErrMissingClockSetter
}
var err error
srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB)
if err != nil {
return nil, err

View File

@@ -357,7 +357,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState, false))
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)

View File

@@ -23,6 +23,7 @@ go_library(
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -22,6 +22,7 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -204,7 +205,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock, _ [][32]byte) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error {
if s.State == nil {
return ErrNilState
}
@@ -603,3 +604,6 @@ func (s *ChainService) FinalizedBlockHash() [32]byte {
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return [32]byte{}
}
// SendNewBlobEvent mocks the same method in the chain service
func (s *ChainService) SendNewBlobEvent(_ [32]byte, _ uint64) {}

View File

@@ -289,13 +289,13 @@ func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *et
sourceFlagIndex := cfg.TimelySourceFlagIndex
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
slotsPerEpoch := cfg.SlotsPerEpoch
sqtRootSlots := cfg.SqrRootSlotsPerEpoch
if matchedSrc && delay <= sqtRootSlots {
participatedFlags[sourceFlagIndex] = true
}
matchedSrcTgt := matchedSrc && matchedTgt
if matchedSrcTgt && delay <= slotsPerEpoch {
// Before Deneb no attestation should pass validation without having delay <= slotsPerEpoch.
if matchedSrcTgt {
participatedFlags[targetFlagIndex] = true
}
matchedSrcTgtHead := matchedHead && matchedSrcTgt

View File

@@ -630,6 +630,9 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
targetFlagIndex := cfg.TimelyTargetFlagIndex
headFlagIndex := cfg.TimelyHeadFlagIndex
denebState, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, denebState.SetSlot(1))
tests := []struct {
name string
inputState state.BeaconState
@@ -678,6 +681,34 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
targetFlagIndex: true,
},
},
{
name: "participated source and target with delay",
inputState: func() state.BeaconState {
return beaconState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputDelay: params.BeaconConfig().SlotsPerEpoch + 1,
participationIndices: map[uint8]bool{
targetFlagIndex: true,
},
},
{
name: "participated source and target with delay in deneb",
inputState: func() state.BeaconState {
return denebState
}(),
inputData: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
Target: &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]},
},
inputDelay: params.BeaconConfig().SlotsPerEpoch + 1,
participationIndices: map[uint8]bool{
targetFlagIndex: true,
},
},
{
name: "participated source and target and head",
inputState: func() state.BeaconState {
@@ -696,7 +727,6 @@ func TestAttestationParticipationFlagIndices(t *testing.T) {
},
},
}
for _, test := range tests {
flagIndices, err := altair.AttestationParticipationFlagIndices(test.inputState, test.inputData, test.inputDelay)
require.NoError(t, err)

View File

@@ -16,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"go.opencensus.io/trace"
)
@@ -81,7 +82,6 @@ func VerifyAttestationNoVerifySignature(
s := att.Data.Slot
minInclusionCheck := s+params.BeaconConfig().MinAttestationInclusionDelay <= beaconState.Slot()
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
if !minInclusionCheck {
return fmt.Errorf(
"attestation slot %d + inclusion delay %d > state slot %d",
@@ -90,13 +90,17 @@ func VerifyAttestationNoVerifySignature(
beaconState.Slot(),
)
}
if !epochInclusionCheck {
return fmt.Errorf(
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
beaconState.Slot(),
s,
params.BeaconConfig().SlotsPerEpoch,
)
if beaconState.Version() < version.Deneb {
epochInclusionCheck := beaconState.Slot() <= s+params.BeaconConfig().SlotsPerEpoch
if !epochInclusionCheck {
return fmt.Errorf(
"state slot %d > attestation slot %d + SLOTS_PER_EPOCH %d",
beaconState.Slot(),
s,
params.BeaconConfig().SlotsPerEpoch,
)
}
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, beaconState, att.Data.Target.Epoch)
if err != nil {

View File

@@ -127,6 +127,44 @@ func TestProcessAttestationsNoVerify_OK(t *testing.T) {
assert.NoError(t, err)
}
func TestProcessAttestationsNoVerify_OlderThanSlotsPerEpoch(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(1, true)
att := &ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
}
ctx := context.Background()
t.Run("attestation older than slots per epoch", func(t *testing.T) {
beaconState, _ := util.DeterministicGenesisState(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + 1)
require.NoError(t, err)
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
require.ErrorContains(t, "state slot 33 > attestation slot 0 + SLOTS_PER_EPOCH 32", blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att))
})
t.Run("attestation older than slots per epoch in deneb", func(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateDeneb(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + 1)
require.NoError(t, err)
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
require.NoError(t, blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att))
})
}
func TestVerifyAttestationNoVerifySignature_OK(t *testing.T) {
// Attestation with an empty signature

View File

@@ -413,7 +413,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
}
}
func TestFuzzVerifyExit_10000(_ *testing.T) {
func TestFuzzVerifyExit_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
ve := &ethpb.SignedVoluntaryExit{}
rawVal := &ethpb.Validator{}
@@ -425,9 +425,18 @@ func TestFuzzVerifyExit_10000(_ *testing.T) {
fuzzer.Fuzz(rawVal)
fuzzer.Fuzz(fork)
fuzzer.Fuzz(&slot)
state := &ethpb.BeaconState{
Slot: slot,
Fork: fork,
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
}
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
val, err := state_native.NewValidator(&ethpb.Validator{})
_ = err
err = VerifyExitAndSignature(val, slot, fork, ve, params.BeaconConfig().ZeroHash[:])
err = VerifyExitAndSignature(val, s, ve)
_ = err
}
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -50,6 +51,8 @@ func ProcessVoluntaryExits(
beaconState state.BeaconState,
exits []*ethpb.SignedVoluntaryExit,
) (state.BeaconState, error) {
maxExitEpoch, churn := v.ValidatorsMaxExitEpochAndChurn(beaconState)
var exitEpoch primitives.Epoch
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
@@ -58,11 +61,18 @@ func ProcessVoluntaryExits(
if err != nil {
return nil, err
}
if err := VerifyExitAndSignature(val, beaconState.Slot(), beaconState.Fork(), exit, beaconState.GenesisValidatorsRoot()); err != nil {
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex)
if err != nil {
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
if err == nil {
if exitEpoch > maxExitEpoch {
maxExitEpoch = exitEpoch
churn = 1
} else if exitEpoch == maxExitEpoch {
churn++
}
} else if !errors.Is(err, v.ValidatorAlreadyExitedErr) {
return nil, err
}
}
@@ -92,15 +102,27 @@ func ProcessVoluntaryExits(
// initiate_validator_exit(state, voluntary_exit.validator_index)
func VerifyExitAndSignature(
validator state.ReadOnlyValidator,
currentSlot primitives.Slot,
fork *ethpb.Fork,
state state.ReadOnlyBeaconState,
signed *ethpb.SignedVoluntaryExit,
genesisRoot []byte,
) error {
if signed == nil || signed.Exit == nil {
return errors.New("nil exit")
}
currentSlot := state.Slot()
fork := state.Fork()
genesisRoot := state.GenesisValidatorsRoot()
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
// This allows for signed validator exits to be valid forever.
if state.Version() >= version.Deneb {
fork = &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
}
exit := signed.Exit
if err := verifyExitConditions(validator, currentSlot, exit); err != nil {
return err

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -133,38 +134,38 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
}
func TestVerifyExitAndSignature(t *testing.T) {
type args struct {
currentSlot primitives.Slot
}
tests := []struct {
name string
args args
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error)
setup func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error)
wantErr string
}{
{
name: "Empty Exit",
args: args{
currentSlot: 0,
},
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
Epoch: 0,
}
genesisRoot := [32]byte{'a'}
return &ethpb.Validator{}, &ethpb.SignedVoluntaryExit{}, fork, genesisRoot[:], nil
st := &ethpb.BeaconState{
Slot: 0,
Fork: fork,
GenesisValidatorsRoot: genesisRoot[:],
}
s, err := state_native.InitializeFromProtoUnsafePhase0(st)
if err != nil {
return nil, nil, nil, err
}
return &ethpb.Validator{}, &ethpb.SignedVoluntaryExit{}, s, nil
},
wantErr: "nil exit",
},
{
name: "Happy Path",
args: args{
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
},
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
@@ -186,15 +187,18 @@ func TestVerifyExitAndSignature(t *testing.T) {
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
signedExit.Signature = sig.Marshal()
return validator, signedExit, fork, bs.GenesisValidatorsRoot(), nil
if err := bs.SetFork(fork); err != nil {
return nil, nil, nil, err
}
if err := bs.SetSlot((params.BeaconConfig().SlotsPerEpoch * 2) + 1); err != nil {
return nil, nil, nil, err
}
return validator, signedExit, bs, nil
},
},
{
name: "bad signature",
args: args{
currentSlot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
},
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, *ethpb.Fork, []byte, error) {
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
@@ -215,27 +219,72 @@ func TestVerifyExitAndSignature(t *testing.T) {
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
signedExit.Signature = sig.Marshal()
genesisRoot := [32]byte{'a'}
if err := bs.SetFork(fork); err != nil {
return nil, nil, nil, err
}
if err := bs.SetSlot((params.BeaconConfig().SlotsPerEpoch * 2) + 1); err != nil {
return nil, nil, nil, err
}
// use wrong genesis root and don't update validator
return validator, signedExit, fork, genesisRoot[:], nil
genesisRoot := [32]byte{'a'}
if err := bs.SetGenesisValidatorsRoot(genesisRoot[:]); err != nil {
return nil, nil, nil, err
}
return validator, signedExit, bs, nil
},
wantErr: "signature did not verify",
},
{
name: "EIP-7044: deneb exits should verify with capella fork information",
setup: func() (*ethpb.Validator, *ethpb.SignedVoluntaryExit, state.ReadOnlyBeaconState, error) {
fork := &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: primitives.Epoch(2),
}
signedExit := &ethpb.SignedVoluntaryExit{
Exit: &ethpb.VoluntaryExit{
Epoch: 2,
ValidatorIndex: 0,
},
}
bs, keys := util.DeterministicGenesisState(t, 1)
bs, err := state_native.InitializeFromProtoUnsafeDeneb(&ethpb.BeaconStateDeneb{
GenesisValidatorsRoot: bs.GenesisValidatorsRoot(),
Fork: fork,
Slot: (params.BeaconConfig().SlotsPerEpoch * 2) + 1,
Validators: bs.Validators(),
})
if err != nil {
return nil, nil, nil, err
}
validator := bs.Validators()[0]
validator.ActivationEpoch = 1
err = bs.UpdateValidatorAtIndex(0, validator)
require.NoError(t, err)
sb, err := signing.ComputeDomainAndSign(bs, signedExit.Exit.Epoch, signedExit.Exit, params.BeaconConfig().DomainVoluntaryExit, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
signedExit.Signature = sig.Marshal()
return validator, signedExit, bs, nil
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := params.BeaconConfig().ShardCommitteePeriod
params.BeaconConfig().ShardCommitteePeriod = 0
validator, signedExit, fork, genesisRoot, err := tt.setup()
validator, signedExit, st, err := tt.setup()
require.NoError(t, err)
rvalidator, err := state_native.NewValidator(validator)
require.NoError(t, err)
err = blocks.VerifyExitAndSignature(
rvalidator,
tt.args.currentSlot,
fork,
st,
signedExit,
genesisRoot,
)
if tt.wantErr == "" {
require.NoError(t, err)

View File

@@ -141,7 +141,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
// else:
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
// # FillFwd sweep by the max length of the sweep if there was not a full set of withdrawals
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index

View File

@@ -101,8 +101,8 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
ExcessDataGas: 0,
DataGasUsed: 0,
ExcessBlobGas: 0,
BlobGasUsed: 0,
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
},

View File

@@ -110,8 +110,11 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state
isActive := helpers.IsActiveValidator(validator, currentEpoch)
belowEjectionBalance := validator.EffectiveBalance <= ejectionBal
if isActive && belowEjectionBalance {
state, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx))
if err != nil {
// Here is fine to do a quadratic loop since this should
// barely happen
maxExitEpoch, churn := validators.ValidatorsMaxExitEpochAndChurn(state)
state, _, err = validators.InitiateValidatorExit(ctx, state, primitives.ValidatorIndex(idx), maxExitEpoch, churn)
if err != nil && !errors.Is(err, validators.ValidatorAlreadyExitedErr) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}
}

View File

@@ -12,6 +12,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
)
var (
@@ -166,14 +167,39 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
lowerBoundsSlot,
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooLateCount.Inc()
return errors.Join(ErrTooLate, attError)
}
if attTime.After(upperBounds) {
attReceivedTooEarlyCount.Inc()
return attError
}
attEpoch := slots.ToEpoch(attSlot)
if attEpoch < params.BeaconConfig().DenebForkEpoch {
if attTime.Before(lowerBounds) {
attReceivedTooLateCount.Inc()
return errors.Join(ErrTooLate, attError)
}
return nil
}
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
currentEpoch := slots.ToEpoch(currentSlot)
prevEpoch, err := currentEpoch.SafeSub(1)
if err != nil {
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
prevEpoch = 0
}
attSlotEpoch := slots.ToEpoch(attSlot)
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
attError = fmt.Errorf(
"attestation epoch %d not within current epoch %d or previous epoch %d",
attSlot/params.BeaconConfig().SlotsPerEpoch,
currentEpoch,
prevEpoch,
)
return errors.Join(ErrTooLate, attError)
}
return nil
}

View File

@@ -85,6 +85,11 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
}
func Test_ValidateAttestationTime(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 5
params.OverrideBeaconConfig(cfg)
params.SetupTestConfigCleanup(t)
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
}
@@ -155,6 +160,39 @@ func Test_ValidateAttestationTime(t *testing.T) {
).Add(200 * time.Millisecond),
},
},
{
name: "attestation.slot < current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange - 1,
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
},
{
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: prysmTime.Now().Add(-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
},
{
name: "attestation.slot = current_slot-ATTESTATION_PROPAGATION_SLOT_RANGE, received 200ms late in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: prysmTime.Now().Add(
-300 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
},
{
name: "attestation.slot != current epoch or previous epoch in deneb",
args: args{
attSlot: 300 - params.BeaconNetworkConfig().AttestationPropagationSlotRange,
genesisTime: prysmTime.Now().Add(
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
},
{
name: "attestation.slot is well beyond current slot",
args: args{

View File

@@ -295,43 +295,39 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
}
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
for _, e := range []primitives.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
// list with committee index and epoch number. It caches the shuffled indices for the input epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, e primitives.Epoch) error {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
return nil
}

View File

@@ -413,7 +413,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(1)
epoch := primitives.Epoch(0)
idx := primitives.CommitteeIndex(1)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
@@ -423,6 +423,40 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)
for i := primitives.ValidatorIndex(0); uint64(i) < validatorCount; i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 1,
}
indices[i] = i
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 300000)
for i := 0; i < len(validators); i++ {

View File

@@ -184,7 +184,7 @@ func innerShuffleList(input []primitives.ValidatorIndex, seed [32]byte, shuffle
for {
buf[seedSize] = r
ph := hashfunc(buf[:pivotViewSize])
pivot := bytesutil.FromBytes8(ph[:8]) % listSize
pivot := binary.LittleEndian.Uint64(ph[:8]) % listSize
mirror := (pivot + 1) >> 1
binary.LittleEndian.PutUint32(buf[pivotViewSize:], uint32(pivot>>8))
source := hashfunc(buf)

View File

@@ -205,3 +205,17 @@ func ParseWeakSubjectivityInputString(wsCheckpointString string) (*v1alpha1.Chec
Root: bRoot,
}, nil
}
// MinEpochsForBlockRequests computes the number of epochs of block history that we need to maintain,
// relative to the current epoch, per the p2p specs. This is used to compute the slot where backfill is complete.
// value defined:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#configuration
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33024, ~5 months)
// detailed rationale: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// TODO: ask around to understand why the FAQ section of the p2p spec shows
// multiplying and dividing the churn limit quotient by the max safety decay value of 100,
// but in the definition of the constant, the simpler equation below is used.
func MinEpochsForBlockRequests() primitives.Epoch {
return params.BeaconConfig().MinValidatorWithdrawabilityDelay +
primitives.Epoch(params.BeaconConfig().ChurnLimitQuotient/2)
}

View File

@@ -281,3 +281,19 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
return beaconState
}
func TestMinEpochsForBlockRequests(t *testing.T) {
params.SetActiveTestCleanup(t, params.MainnetConfig())
var expected primitives.Epoch = 33024
// expected value of 33024 via spec commentary:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs
// MIN_EPOCHS_FOR_BLOCK_REQUESTS is calculated using the arithmetic from compute_weak_subjectivity_period found in the weak subjectivity guide. Specifically to find this max epoch range, we use the worst case event of a very large validator size (>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT).
//
// MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
// MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// + MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
// )
//
// Where MAX_SAFETY_DECAY = 100 and thus MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024 (~5 months).
require.Equal(t, expected, helpers.MinEpochsForBlockRequests())
}

View File

@@ -16,6 +16,7 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
],

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
// ForkVersionByteLength length of fork version byte array.
@@ -56,7 +57,18 @@ const (
// ComputeDomainAndSign computes the domain and signing root and sign it using the passed in private key.
func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch, obj fssz.HashRoot, domain [4]byte, key bls.SecretKey) ([]byte, error) {
d, err := Domain(st.Fork(), epoch, domain, st.GenesisValidatorsRoot())
fork := st.Fork()
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
// This allows for signed validator exits to be valid forever.
if st.Version() >= version.Deneb && domain == params.BeaconConfig().DomainVoluntaryExit {
fork = &ethpb.Fork{
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: params.BeaconConfig().CapellaForkEpoch,
}
}
d, err := Domain(fork, epoch, domain, st.GenesisValidatorsRoot())
if err != nil {
return nil, err
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
@@ -324,6 +325,10 @@ func ProcessBlockForStateRoot(
}
}
if err := VerifyBlobCommitmentCount(blk); err != nil {
return nil, err
}
randaoReveal := signed.Block().Body().RandaoReveal()
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
if err != nil {
@@ -359,6 +364,20 @@ func ProcessBlockForStateRoot(
return state, nil
}
func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
if blk.Version() < version.Deneb {
return nil
}
kzgs, err := blk.Body().BlobKzgCommitments()
if err != nil {
return err
}
if len(kzgs) > field_params.MaxBlobsPerBlock {
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
}
return nil
}
// This calls altair block operations.
func altairOperations(
ctx context.Context,

View File

@@ -2,11 +2,13 @@ package transition_test
import (
"context"
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
field_params "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -210,3 +212,15 @@ func TestProcessBlockDifferentVersion(t *testing.T) {
_, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
require.ErrorContains(t, "state and block are different version. 0 != 1", err)
}
func TestVerifyBlobCommitmentCount(t *testing.T) {
b := &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}
rb, err := blocks.NewBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, transition.VerifyBlobCommitmentCount(rb))
b = &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, field_params.MaxBlobsPerBlock+1)}}
rb, err = blocks.NewBeaconBlock(b)
require.NoError(t, err)
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", field_params.MaxBlobsPerBlock+1), transition.VerifyBlobCommitmentCount(rb))
}

View File

@@ -15,7 +15,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -13,11 +13,34 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// ValidatorAlreadyExitedErr is an error raised when trying to process an exit of
// an already exited validator
var ValidatorAlreadyExitedErr = errors.New("validator already exited")
// ValidatorsMaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
// epoch and the number of them
func ValidatorsMaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
e := val.ExitEpoch()
if e != farFutureEpoch {
if e > maxExitEpoch {
maxExitEpoch = e
churn = 1
} else if e == maxExitEpoch {
churn++
}
}
return nil
})
_ = err
return
}
// InitiateValidatorExit takes in validator index and updates
// validator with correct voluntary exit parameters.
//
@@ -42,73 +65,43 @@ import (
// # Set validator exit epoch and withdrawable epoch
// validator.exit_epoch = exit_queue_epoch
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) (state.BeaconState, error) {
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitableEpoch > exitQueueEpoch {
exitQueueEpoch = exitableEpoch
churn = 0
}
validator, err := s.ValidatorAtIndex(idx)
if err != nil {
return nil, err
return nil, 0, err
}
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, nil
}
var exitEpochs []primitives.Epoch
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch())
}
return nil
})
if err != nil {
return nil, err
}
exitEpochs = append(exitEpochs, helpers.ActivationExitEpoch(time.CurrentEpoch(s)))
// Obtain the exit queue epoch as the maximum number in the exit epochs array.
exitQueueEpoch := primitives.Epoch(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := uint64(0)
err = s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
if val.ExitEpoch() == exitQueueEpoch {
var mErr error
exitQueueChurn, mErr = mathutil.Add64(exitQueueChurn, 1)
if mErr != nil {
return mErr
}
}
return nil
})
if err != nil {
return nil, err
return s, validator.ExitEpoch, ValidatorAlreadyExitedErr
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
if err != nil {
return nil, errors.Wrap(err, "could not get active validator count")
return nil, 0, errors.Wrap(err, "could not get active validator count")
}
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
currentChurn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
if err != nil {
return nil, errors.Wrap(err, "could not get churn limit")
return nil, 0, errors.Wrap(err, "could not get churn limit")
}
if exitQueueChurn >= churn {
if churn >= currentChurn {
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
if err != nil {
return nil, err
return nil, 0, err
}
}
validator.ExitEpoch = exitQueueEpoch
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
if err != nil {
return nil, err
return nil, 0, err
}
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
return nil, err
return nil, 0, err
}
return s, nil
return s, exitQueueEpoch, nil
}
// SlashValidator slashes the malicious validator's balance and awards
@@ -144,8 +137,9 @@ func SlashValidator(
slashedIdx primitives.ValidatorIndex,
penaltyQuotient uint64,
proposerRewardQuotient uint64) (state.BeaconState, error) {
s, err := InitiateValidatorExit(ctx, s, slashedIdx)
if err != nil {
maxExitEpoch, churn := ValidatorsMaxExitEpochAndChurn(s)
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
if err != nil && !errors.Is(err, ValidatorAlreadyExitedErr) {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
}
currentEpoch := slots.ToEpoch(s.Slot())

View File

@@ -48,8 +48,9 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, err := InitiateValidatorExit(context.Background(), state, 0)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, 0, 199, 1)
require.ErrorIs(t, err, ValidatorAlreadyExitedErr)
require.Equal(t, exitEpoch, epoch)
v, err := newState.ValidatorAtIndex(0)
require.NoError(t, err)
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
@@ -66,8 +67,9 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, err := InitiateValidatorExit(context.Background(), state, idx)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
require.NoError(t, err)
require.Equal(t, exitedEpoch+2, epoch)
v, err := newState.ValidatorAtIndex(idx)
require.NoError(t, err)
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
@@ -85,8 +87,9 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, err := InitiateValidatorExit(context.Background(), state, idx)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
require.NoError(t, err)
require.Equal(t, exitedEpoch+3, epoch)
// Because of exit queue overflow,
// validator who init exited has to wait one more epoch.
@@ -106,7 +109,7 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
_, err = InitiateValidatorExit(context.Background(), state, 1)
_, _, err = InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
require.ErrorContains(t, "addition overflows", err)
}
@@ -337,3 +340,78 @@ func TestExitedValidatorIndices(t *testing.T) {
assert.DeepEqual(t, tt.wanted, exitedIndices)
}
}
func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
tests := []struct {
state *ethpb.BeaconState
wantedEpoch primitives.Epoch
wantedChurn uint64
}{
{
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
wantedEpoch: 0,
wantedChurn: 3,
},
{
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
wantedEpoch: 0,
wantedChurn: 0,
},
{
state: &ethpb.BeaconState{
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 1,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 1,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
},
},
wantedEpoch: 1,
wantedChurn: 2,
},
}
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
epoch, churn := ValidatorsMaxExitEpochAndChurn(s)
require.Equal(t, tt.wantedEpoch, epoch)
require.Equal(t, tt.wantedChurn, churn)
}
}

View File

@@ -16,6 +16,7 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//monitoring/backup:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
],

View File

@@ -14,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/monitoring/backup"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -61,7 +62,7 @@ type ReadOnlyDatabase interface {
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
}
// NoHeadAccessDatabase defines a struct without access to chain head data.
@@ -116,7 +117,7 @@ type HeadAccessDatabase interface {
// initialization method needed for origin checkpoint sync
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
}
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"archived_point.go",
"backfill.go",
"backup.go",
"blob.go",
"blocks.go",
@@ -50,6 +51,7 @@ go_library(
"//io/file:go_default_library",
"//monitoring/progress:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
@@ -75,6 +77,7 @@ go_test(
name = "go_default_test",
srcs = [
"archived_point_test.go",
"backfill_test.go",
"backup_test.go",
"blob_test.go",
"blocks_test.go",
@@ -110,6 +113,7 @@ go_test(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/dbval:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",
"//testing/assert:go_default_library",

View File

@@ -0,0 +1,39 @@
package kv
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
func (s *Store) SaveBackfillStatus(ctx context.Context, bf *dbval.BackfillStatus) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
defer span.End()
bfb, err := proto.Marshal(bf)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillStatusKey, bfb)
})
}
func (s *Store) BackfillStatus(ctx context.Context) (*dbval.BackfillStatus, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillStatus")
defer span.End()
bf := &dbval.BackfillStatus{}
err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
bs := bucket.Get(backfillStatusKey)
if len(bs) == 0 {
return errors.Wrap(ErrNotFound, "BackfillStatus not found")
}
return proto.Unmarshal(bs, bf)
})
return bf, err
}

View File

@@ -0,0 +1,35 @@
package kv
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/proto"
)
func TestBackfillRoundtrip(t *testing.T) {
db := setupDB(t)
b := &dbval.BackfillStatus{}
b.LowSlot = 23
b.LowRoot = bytesutil.PadTo([]byte("low"), 32)
b.LowParentRoot = bytesutil.PadTo([]byte("parent"), 32)
m, err := proto.Marshal(b)
require.NoError(t, err)
ub := &dbval.BackfillStatus{}
require.NoError(t, proto.Unmarshal(m, ub))
require.Equal(t, b.LowSlot, ub.LowSlot)
require.DeepEqual(t, b.LowRoot, ub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, ub.LowParentRoot)
ctx := context.Background()
require.NoError(t, db.SaveBackfillStatus(ctx, b))
dbub, err := db.BackfillStatus(ctx)
require.NoError(t, err)
require.Equal(t, b.LowSlot, dbub.LowSlot)
require.DeepEqual(t, b.LowRoot, dbub.LowRoot)
require.DeepEqual(t, b.LowParentRoot, dbub.LowParentRoot)
}

View File

@@ -12,10 +12,31 @@ import (
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// A blob rotating key is represented as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
type blobRotatingKey []byte
// BufferPrefix returns the first 8 bytes of the rotating key.
// This represents bytes(slot_to_rotating_buffer(blob.slot)) in the rotating key.
func (rk blobRotatingKey) BufferPrefix() []byte {
return rk[0:8]
}
// Slot returns the information from the key.
func (rk blobRotatingKey) Slot() types.Slot {
slotBytes := rk[8:16]
return bytesutil.BytesToSlotBigEndian(slotBytes)
}
// BlockRoot returns the block root information from the key.
func (rk blobRotatingKey) BlockRoot() []byte {
return rk[16:]
}
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
//
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
@@ -23,7 +44,7 @@ import (
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
//
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
// in the rotating keys buffer, we overwrite all elements for that slot.
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
defer span.End()
@@ -38,11 +59,12 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
if err != nil {
return err
}
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
newKey := blobSidecarKey(scs[0])
rotatingBufferPrefix := newKey[0:8]
var replacingKey []byte
rotatingBufferPrefix := newKey.BufferPrefix()
var replacingKey blobRotatingKey
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
if len(k) != 0 {
replacingKey = k
@@ -52,16 +74,33 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
// store the blob by key and exit early.
if len(replacingKey) != 0 {
if err := bkt.Delete(replacingKey); err != nil {
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
oldSlot := replacingKey.Slot()
oldEpoch := slots.ToEpoch(oldSlot)
// The blob we are replacing is too old, so we delete it.
if slots.ToEpoch(scs[0].Slot) >= oldEpoch.Add(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)) {
if err := bkt.Delete(replacingKey); err != nil {
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
}
} else {
// Otherwise, we need to merge the new blob with the old blob.
enc := bkt.Get(replacingKey)
sc := &ethpb.BlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return err
}
sc.Sidecars = append(sc.Sidecars, scs...)
sortSideCars(sc.Sidecars)
encodedBlobSidecar, err = encode(ctx, &ethpb.BlobSidecars{Sidecars: sc.Sidecars})
if err != nil {
return err
}
}
}
return bkt.Put(newKey, encodedBlobSidecar)
})
}
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index.
// It also ensures that indices are sequential and start at 0 and no more than MAX_BLOB_EPOCHS.
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and no more than MAX_BLOB_EPOCHS.
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
if len(scs) == 0 {
return errors.New("nil or empty blob sidecars")
@@ -75,7 +114,7 @@ func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
r := scs[0].BlockRoot
p := scs[0].ProposerIndex
for i, sc := range scs {
for _, sc := range scs {
if sc.Slot != sl {
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
}
@@ -88,9 +127,6 @@ func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
if sc.ProposerIndex != p {
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
}
if sc.Index != uint64(i) {
return fmt.Errorf("sidecar index mismatch: %d != %d", sc.Index, i)
}
}
return nil
}
@@ -210,7 +246,7 @@ func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte)
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
key := slotKey(blob.Slot)
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
key = append(key, blob.BlockRoot...)
@@ -226,11 +262,10 @@ func slotKey(slot types.Slot) []byte {
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(epochsForBlobSidecarsRequestBucket)
k := []byte("epoch-key")
v := b.Get(k)
b := tx.Bucket(chainMetadataBucket)
v := b.Get(blobRetentionEpochsKey)
if v == nil {
if err := b.Put(k, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
return err
}
return nil

View File

@@ -10,6 +10,7 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
@@ -62,11 +63,22 @@ func TestStore_BlobSidecars(t *testing.T) {
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by root (max)", func(t *testing.T) {
t.Run("save and retrieve by root (max), per batch", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by root, max and individually", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
}
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
@@ -116,6 +128,17 @@ func TestStore_BlobSidecars(t *testing.T) {
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by slot, max and individually", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
}
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
@@ -157,11 +180,25 @@ func TestStore_BlobSidecars(t *testing.T) {
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("saving a new blob for rotation", func(t *testing.T) {
t.Run("saving blob different times", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for i := 0; i < fieldparams.MaxBlobsPerBlock; i++ {
scs[i].Slot = primitives.Slot(i)
scs[i].BlockRoot = bytesutil.PadTo([]byte{byte(i)}, 32)
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{scs[i]}))
br := bytesutil.ToBytes32(scs[i].BlockRoot)
saved, err := db.BlobSidecarsByRoot(ctx, br)
require.NoError(t, err)
require.NoError(t, equalBlobSlices([]*ethpb.BlobSidecar{scs[i]}, saved))
}
})
t.Run("saving a new blob for rotation (batch)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
oldBlockRoot := scs[0].BlockRoot
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
require.NoError(t, err)
@@ -181,6 +218,82 @@ func TestStore_BlobSidecars(t *testing.T) {
require.NoError(t, err)
require.NoError(t, equalBlobSlices(newScs, got))
})
t.Run("save multiple blobs after new rotation (individually)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range scs {
sc.Slot = sc.Slot + newRetentionSlot
}
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
}
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save multiple blobs after new rotation (batch then individually)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
oldBlockRoot := scs[0].BlockRoot
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range scs {
sc.Slot = sc.Slot + newRetentionSlot
}
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
}
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save multiple blobs after new rotation (individually then batch)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range scs {
sc.Slot = sc.Slot + newRetentionSlot
}
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
}
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
@@ -227,7 +340,6 @@ func TestStore_verifySideCars(t *testing.T) {
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
{name: "invalid side index", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 0}}, error: "sidecar index mismatch: 0 != 1"},
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
}
for _, tt := range tests {
@@ -305,3 +417,14 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
params.OverrideBeaconNetworkConfig(nConfig)
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
}
func TestBlobRotatingKey(t *testing.T) {
k := blobSidecarKey(&ethpb.BlobSidecar{
Slot: 1,
BlockRoot: []byte{2},
})
require.Equal(t, types.Slot(1), k.Slot())
require.DeepEqual(t, []byte{2}, k.BlockRoot())
require.DeepEqual(t, slotKey(types.Slot(1)), k.BufferPrefix())
}

View File

@@ -70,25 +70,6 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
return root, err
}
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
defer span.End()
var root [32]byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blocksBucket)
rootSlice := bkt.Get(backfillBlockRootKey)
if len(rootSlice) == 0 {
return ErrNotFoundBackfillBlockRoot
}
root = bytesutil.ToBytes32(rootSlice)
return nil
})
return root, err
}
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
@@ -417,17 +398,6 @@ func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32
})
}
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
// the node was initialized via checkpoint sync.
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(blocksBucket)
return bucket.Put(backfillBlockRootKey, blockRoot[:])
})
}
// HighestRootsBelowSlot returns roots from the database slot index from the highest slot below the input slot.
// The slot value at the beginning of the return list is the slot where the roots were found. This is helpful so that
// calling code can make decisions based on the slot without resolving the blocks to discover their slot (for instance

View File

@@ -126,23 +126,6 @@ var blockTests = []struct {
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
db := setupDB(t)
ctx := context.Background()
_, err := db.BackfillBlockRoot(ctx)
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
var expected [32]byte
copy(expected[:], []byte{0x23})
err = db.SaveBackfillBlockRoot(ctx, expected)
require.NoError(t, err)
actual, err := db.BackfillBlockRoot(ctx)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
BlockCacheSize = 1
slot := primitives.Slot(20)

View File

@@ -131,7 +131,6 @@ var Buckets = [][]byte{
registrationBucket,
blobsBucket,
epochsForBlobSidecarsRequestBucket,
}
// NewKVStore initializes a new boltDB key-value store at the directory

View File

@@ -8,6 +8,7 @@ package kv
// corresponding attestations.
var (
attestationsBucket = []byte("attestations")
blobsBucket = []byte("blobs")
blocksBucket = []byte("blocks")
stateBucket = []byte("state")
stateSummaryBucket = []byte("state-summary")
@@ -46,7 +47,10 @@ var (
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
blobsBucket = []byte("blobs")
// blobRetentionEpochsKey determines the size of the blob circular buffer and how the keys in that buffer are
// determined. If this value changes, the existing data is invalidated, so storing it in the db
// allows us to assert at runtime that the db state is still consistent with the runtime state.
blobRetentionEpochsKey = []byte("blob-retention-epochs")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
@@ -61,8 +65,8 @@ var (
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
backfillBlockRootKey = []byte("backfill-block-root")
// tracking data about an ongoing backfill
backfillStatusKey = []byte("backfill-status")
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
lastArchivedIndexKey = []byte("last-archived")
@@ -74,7 +78,4 @@ var (
// Migrations
migrationsBucket = []byte("migrations")
// Stores how long to keep the blob sidecars for.
epochsForBlobSidecarsRequestBucket = []byte("epochs-for-blob-sidecars-request")
)

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/proto/dbval"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -17,18 +18,6 @@ import (
// syncing, using the provided values as their point of origin. This is an alternative
// to syncing from genesis, and should only be run on an empty database.
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
genesisRoot, err := s.GenesisBlockRoot(ctx)
if err != nil {
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
}
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
}
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
if err != nil {
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
}
cf, err := detect.FromState(serState)
if err != nil {
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
@@ -50,11 +39,24 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
}
blk := wblk.Block()
// save block
blockRoot, err := blk.HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
}
pr := blk.ParentRoot()
bf := &dbval.BackfillStatus{
LowSlot: uint64(wblk.Block().Slot()),
LowRoot: blockRoot[:],
LowParentRoot: pr[:],
OriginRoot: blockRoot[:],
OriginSlot: uint64(wblk.Block().Slot()),
}
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync.")
}
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
if err := s.SaveBlock(ctx, wblk); err != nil {
return errors.Wrap(err, "could not save checkpoint block")

View File

@@ -71,6 +71,7 @@ go_library(
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -28,6 +28,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/proto"
)
var (
@@ -54,6 +55,8 @@ const (
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
ForkchoiceUpdatedMethodV2 = "engine_forkchoiceUpdatedV2"
// ForkchoiceUpdatedMethodV3 v3 request string for JSON-RPC.
ForkchoiceUpdatedMethodV3 = "engine_forkchoiceUpdatedV3"
// GetPayloadMethod v1 request string for JSON-RPC.
GetPayloadMethod = "engine_getPayloadV1"
// GetPayloadMethodV2 v2 request string for JSON-RPC.
@@ -78,8 +81,9 @@ const (
// ForkchoiceUpdatedResponse is the response kind received by the
// engine_forkchoiceUpdatedV1 endpoint.
type ForkchoiceUpdatedResponse struct {
Status *pb.PayloadStatus `json:"payloadStatus"`
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
Status *pb.PayloadStatus `json:"payloadStatus"`
PayloadId *pb.PayloadIDBytes `json:"payloadId"`
ValidationError string `json:"validationError"`
}
// ExecutionPayloadReconstructor defines a service that can reconstruct a full beacon
@@ -97,11 +101,11 @@ type ExecutionPayloadReconstructor interface {
// EngineCaller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error)
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error)
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error)
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error)
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
@@ -112,7 +116,7 @@ type EngineCaller interface {
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error) {
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
defer span.End()
start := time.Now()
@@ -149,14 +153,16 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
if !ok {
return nil, errors.New("execution data must be a Deneb execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes)
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes, parentBlockRoot)
if err != nil {
return nil, handleRPCError(err)
}
default:
return nil, errors.New("unknown execution data type")
}
if result.ValidationError != "" {
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in newPayload")
}
switch result.Status {
case pb.PayloadStatus_INVALID_BLOCK_HASH:
return nil, ErrInvalidBlockHashPayloadStatus
@@ -200,7 +206,7 @@ func (s *Service) ForkchoiceUpdated(
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Capella, version.Deneb:
case version.Capella:
a, err := attrs.PbV2()
if err != nil {
return nil, nil, err
@@ -209,6 +215,15 @@ func (s *Service) ForkchoiceUpdated(
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Deneb:
a, err := attrs.PbV3()
if err != nil {
return nil, nil, err
}
err = s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethodV3, state, a)
if err != nil {
return nil, nil, handleRPCError(err)
}
default:
return nil, nil, fmt.Errorf("unknown payload attribute version: %v", attrs.Version())
}
@@ -216,6 +231,9 @@ func (s *Service) ForkchoiceUpdated(
if result.Status == nil {
return nil, nil, ErrNilResponse
}
if result.ValidationError != "" {
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in forkChoiceUpdated")
}
resp := result.Status
switch resp.Status {
case pb.PayloadStatus_SYNCING:
@@ -231,7 +249,7 @@ func (s *Service) ForkchoiceUpdated(
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
// It returns the execution data as well as the blobs bundle.
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
defer span.End()
start := time.Now()
@@ -247,38 +265,38 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
return ed, result.BlobsBundle, nil
return ed, result.BlobsBundle, result.ShouldOverrideBuilder, nil
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
result := &pb.ExecutionPayloadCapellaWithValue{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
return ed, nil, nil
return ed, nil, false, nil
}
result := &pb.ExecutionPayload{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
return nil, nil, false, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayload(result)
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
return ed, nil, nil
return ed, nil, false, nil
}
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
@@ -576,7 +594,10 @@ func (s *Service) ReconstructFullBlock(
// If the payload header has a block hash of 0x0, it means we are pre-merge and should
// simply return the block with an empty execution payload.
if bytes.Equal(header.BlockHash(), params.BeaconConfig().ZeroHash[:]) {
payload := buildEmptyExecutionPayload()
payload, err := buildEmptyExecutionPayload(blindedBlock.Version())
if err != nil {
return nil, err
}
return blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlock, payload)
}
@@ -635,7 +656,11 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
// For blocks that are pre-merge we simply reconstruct them via an empty
// execution payload.
for _, realIdx := range zeroExecPayloads {
payload := buildEmptyExecutionPayload()
bblock := blindedBlocks[realIdx]
payload, err := buildEmptyExecutionPayload(bblock.Version())
if err != nil {
return nil, err
}
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload)
if err != nil {
return nil, err
@@ -671,7 +696,7 @@ func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executio
}
executionBlock.Version = version
return fullPayloadFromExecutionBlock(header, executionBlock)
return fullPayloadFromExecutionBlock(version, header, executionBlock)
}
func (s *Service) retrievePayloadsFromExecutionHashes(
@@ -699,16 +724,17 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
// blinded block.
for sliceIdx, realIdx := range validExecPayloads {
var payload interfaces.ExecutionData
bblock := blindedBlocks[realIdx]
if features.Get().EnableOptionalEngineMethods {
b := payloadBodies[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
}
header, err := blindedBlocks[realIdx].Block().Body().Execution()
header, err := bblock.Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromPayloadBody(header, b, blindedBlocks[realIdx].Version())
payload, err = fullPayloadFromPayloadBody(header, b, bblock.Version())
if err != nil {
return nil, err
}
@@ -717,16 +743,16 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
if b == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
}
header, err := blindedBlocks[realIdx].Block().Body().Execution()
header, err := bblock.Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromExecutionBlock(header, b)
payload, err = fullPayloadFromExecutionBlock(bblock.Version(), header, b)
if err != nil {
return nil, err
}
}
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(blindedBlocks[realIdx], payload.Proto())
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(bblock, payload.Proto())
if err != nil {
return nil, err
}
@@ -736,7 +762,7 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
}
func fullPayloadFromExecutionBlock(
header interfaces.ExecutionData, block *pb.ExecutionBlock,
blockVersion int, header interfaces.ExecutionData, block *pb.ExecutionBlock,
) (interfaces.ExecutionData, error) {
if header.IsNil() || block == nil {
return nil, errors.New("execution block and header cannot be nil")
@@ -759,7 +785,7 @@ func fullPayloadFromExecutionBlock(
txs[i] = txBin
}
switch block.Version {
switch blockVersion {
case version.Bellatrix:
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
ParentHash: header.ParentHash(),
@@ -796,13 +822,13 @@ func fullPayloadFromExecutionBlock(
Withdrawals: block.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
edg, err := header.ExcessDataGas()
ebg, err := header.ExcessBlobGas()
if err != nil {
return nil, errors.Wrap(err, "unable to extract ExcessDataGas attribute from excution payload header")
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from excution payload header")
}
dgu, err := header.DataGasUsed()
bgu, err := header.BlobGasUsed()
if err != nil {
return nil, errors.Wrap(err, "unable to extract DataGasUsed attribute from excution payload header")
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from excution payload header")
}
return blocks.WrappedExecutionPayloadDeneb(
&pb.ExecutionPayloadDeneb{
@@ -821,8 +847,8 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
ExcessDataGas: edg,
DataGasUsed: dgu,
ExcessBlobGas: ebg,
BlobGasUsed: bgu,
}, 0) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
@@ -836,7 +862,8 @@ func fullPayloadFromPayloadBody(
return nil, errors.New("execution block and header cannot be nil")
}
if bVersion == version.Bellatrix {
switch bVersion {
case version.Bellatrix:
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
@@ -853,24 +880,56 @@ func fullPayloadFromPayloadBody(
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
})
case version.Capella:
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
Withdrawals: body.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
ebg, err := header.ExcessBlobGas()
if err != nil {
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from excution payload header")
}
bgu, err := header.BlobGasUsed()
if err != nil {
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from excution payload header")
}
return blocks.WrappedExecutionPayloadDeneb(
&pb.ExecutionPayloadDeneb{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
Withdrawals: body.Withdrawals,
ExcessBlobGas: ebg,
BlobGasUsed: bgu,
}, 0) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
}
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: header.BlockHash(),
Transactions: body.Transactions,
Withdrawals: body.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
}
// Handles errors received from the RPC server according to the specification.
@@ -958,18 +1017,51 @@ func tDStringToUint256(td string) (*uint256.Int, error) {
return i, nil
}
func buildEmptyExecutionPayload() *pb.ExecutionPayload {
return &pb.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
ExtraData: make([]byte, 0),
func buildEmptyExecutionPayload(v int) (proto.Message, error) {
switch v {
case version.Bellatrix:
return &pb.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
ExtraData: make([]byte, 0),
}, nil
case version.Capella:
return &pb.ExecutionPayloadCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
ExtraData: make([]byte, 0),
Withdrawals: make([]*pb.Withdrawal, 0),
}, nil
case version.Deneb:
return &pb.ExecutionPayloadDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
ExtraData: make([]byte, 0),
Withdrawals: make([]*pb.Withdrawal, 0),
}, nil
default:
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))
}
}

View File

@@ -75,8 +75,9 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, _, err := srv.GetPayload(ctx, payloadId, 1)
resp, _, override, err := srv.GetPayload(ctx, payloadId, 1)
require.NoError(t, err)
require.Equal(t, false, override)
resPb, err := resp.PbBellatrix()
require.NoError(t, err)
require.DeepEqual(t, want, resPb)
@@ -85,8 +86,9 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, _, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
resp, _, override, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
require.Equal(t, false, override)
resPb, err := resp.PbCapella()
require.NoError(t, err)
require.DeepEqual(t, want, resPb)
@@ -118,7 +120,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -129,7 +131,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -204,8 +206,9 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, _, err := client.GetPayload(ctx, payloadId, 1)
resp, _, override, err := client.GetPayload(ctx, payloadId, 1)
require.NoError(t, err)
require.Equal(t, false, override)
pb, err := resp.PbBellatrix()
require.NoError(t, err)
require.DeepEqual(t, want, pb)
@@ -248,8 +251,9 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, _, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
resp, _, override, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
require.Equal(t, false, override)
pb, err := resp.PbCapella()
require.NoError(t, err)
require.DeepEqual(t, want.ExecutionPayload.BlockHash.Bytes(), pb.BlockHash)
@@ -301,12 +305,13 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, blobsBundle, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
resp, blobsBundle, override, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
g, err := resp.ExcessDataGas()
require.Equal(t, true, override)
g, err := resp.ExcessBlobGas()
require.NoError(t, err)
require.DeepEqual(t, uint64(3), g)
g, err = resp.DataGasUsed()
g, err = resp.BlobGasUsed()
require.NoError(t, err)
require.DeepEqual(t, uint64(2), g)
@@ -467,7 +472,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -481,7 +486,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -495,7 +500,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -509,7 +514,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -523,7 +528,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -537,7 +542,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -551,7 +556,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -565,7 +570,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -579,7 +584,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -593,7 +598,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -607,7 +612,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -621,7 +626,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -635,7 +640,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -1433,8 +1438,8 @@ func fixtures() map[string]interface{} {
BlockHash: foo[:],
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
DataGasUsed: 2,
ExcessDataGas: 3,
BlobGasUsed: 2,
ExcessBlobGas: 3,
}
hexUint := hexutil.Uint64(1)
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
@@ -1456,9 +1461,10 @@ func fixtures() map[string]interface{} {
},
BlockValue: "0x11fffffffff",
}
dgu := hexutil.Uint64(2)
edg := hexutil.Uint64(3)
bgu := hexutil.Uint64(2)
ebg := hexutil.Uint64(3)
executionPayloadWithValueFixtureDeneb := &pb.GetPayloadV3ResponseJson{
ShouldOverrideBuilder: true,
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
ParentHash: &common.Hash{'a'},
FeeRecipient: &common.Address{'b'},
@@ -1474,8 +1480,8 @@ func fixtures() map[string]interface{} {
GasLimit: &hexUint,
GasUsed: &hexUint,
Timestamp: &hexUint,
DataGasUsed: &dgu,
ExcessDataGas: &edg,
BlobGasUsed: &bgu,
ExcessBlobGas: &ebg,
},
BlockValue: "0x11fffffffff",
BlobsBundle: &pb.BlobBundleJSON{
@@ -1598,8 +1604,9 @@ func fixtures() map[string]interface{} {
func Test_fullPayloadFromExecutionBlock(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeader
block *pb.ExecutionBlock
header *pb.ExecutionPayloadHeader
block *pb.ExecutionBlock
version int
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
@@ -1615,9 +1622,9 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Bellatrix,
Hash: common.BytesToHash([]byte("bar")),
Hash: common.BytesToHash([]byte("bar")),
},
version: version.Bellatrix,
},
err: "does not match execution block hash",
},
@@ -1628,9 +1635,9 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Bellatrix,
Hash: wantedHash,
Hash: wantedHash,
},
version: version.Bellatrix,
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
@@ -1646,7 +1653,7 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeader(tt.args.header)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {
@@ -1658,8 +1665,9 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeaderCapella
block *pb.ExecutionBlock
header *pb.ExecutionPayloadHeaderCapella
block *pb.ExecutionBlock
version int
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
@@ -1675,9 +1683,9 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Capella,
Hash: common.BytesToHash([]byte("bar")),
Hash: common.BytesToHash([]byte("bar")),
},
version: version.Capella,
},
err: "does not match execution block hash",
},
@@ -1688,9 +1696,9 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Capella,
Hash: wantedHash,
Hash: wantedHash,
},
version: version.Capella,
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
@@ -1706,7 +1714,7 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {
@@ -1718,8 +1726,9 @@ func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeaderDeneb
block *pb.ExecutionBlock
header *pb.ExecutionPayloadHeaderDeneb
block *pb.ExecutionBlock
version int
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
@@ -1735,9 +1744,9 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Deneb,
Hash: common.BytesToHash([]byte("bar")),
Hash: common.BytesToHash([]byte("bar")),
},
version: version.Deneb,
},
err: "does not match execution block hash",
},
@@ -1748,9 +1757,9 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Deneb,
Hash: wantedHash,
Hash: wantedHash,
},
version: version.Deneb,
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
@@ -1766,7 +1775,7 @@ func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
got, err := fullPayloadFromExecutionBlock(tt.args.version, wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {

View File

@@ -36,4 +36,6 @@ var (
ErrNilResponse = errors.New("nil response")
// ErrRequestTooLarge when the request is too large
ErrRequestTooLarge = errors.New("request too large")
// ErrUnsupportedVersion represents a case where a payload is requested for a block type that doesn't have a known mapping.
ErrUnsupportedVersion = errors.New("unknown ExecutionPayload schema for block version")
)

View File

@@ -69,12 +69,12 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
if err != nil {
return err
}
for _, filterLog := range logs {
for i, filterLog := range logs {
// ignore logs that are not of the required block number
if filterLog.BlockNumber != blkNum.Uint64() {
continue
}
if err := s.ProcessLog(ctx, filterLog); err != nil {
if err := s.ProcessLog(ctx, &logs[i]); err != nil {
return errors.Wrap(err, "could not process log")
}
}
@@ -88,7 +88,7 @@ func (s *Service) ProcessETH1Block(ctx context.Context, blkNum *big.Int) error {
// ProcessLog is the main method which handles the processing of all
// logs from the deposit contract on the eth1 chain.
func (s *Service) ProcessLog(ctx context.Context, depositLog gethtypes.Log) error {
func (s *Service) ProcessLog(ctx context.Context, depositLog *gethtypes.Log) error {
s.processingLock.RLock()
defer s.processingLock.RUnlock()
// Process logs according to their event signature.
@@ -108,7 +108,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethtypes.Log) erro
// ProcessDepositLog processes the log which had been received from
// the eth1 chain by trying to ascertain which participant deposited
// in the contract.
func (s *Service) ProcessDepositLog(ctx context.Context, depositLog gethtypes.Log) error {
func (s *Service) ProcessDepositLog(ctx context.Context, depositLog *gethtypes.Log) error {
pubkey, withdrawalCredentials, amount, signature, merkleTreeIndex, err := contracts.UnpackDepositLogData(depositLog.Data)
if err != nil {
return errors.Wrap(err, "Could not unpack log")
@@ -406,7 +406,7 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
lastReqBlock := s.latestEth1Data.LastRequestedBlock
s.latestEth1DataLock.RUnlock()
for _, filterLog := range logs {
for i, filterLog := range logs {
if filterLog.BlockNumber > currentBlockNum {
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
return 0, 0, err
@@ -417,7 +417,7 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
s.latestEth1DataLock.Unlock()
currentBlockNum = filterLog.BlockNumber
}
if err := s.ProcessLog(ctx, filterLog); err != nil {
if err := s.ProcessLog(ctx, &logs[i]); err != nil {
// In the event the execution client gives us a garbled/bad log
// we reset the last requested block to the previous valid block range. This
// prevents the beacon from advancing processing of logs to another range

View File

@@ -79,7 +79,7 @@ func TestProcessDepositLog_OK(t *testing.T) {
t.Fatal("no logs")
}
err = web3Service.ProcessLog(context.Background(), logs[0])
err = web3Service.ProcessLog(context.Background(), &logs[0])
require.NoError(t, err)
require.LogsDoNotContain(t, hook, "Could not unpack log")
@@ -149,9 +149,9 @@ func TestProcessDepositLog_InsertsPendingDeposit(t *testing.T) {
web3Service.chainStartData.Chainstarted = true
err = web3Service.ProcessDepositLog(context.Background(), logs[0])
err = web3Service.ProcessDepositLog(context.Background(), &logs[0])
require.NoError(t, err)
err = web3Service.ProcessDepositLog(context.Background(), logs[1])
err = web3Service.ProcessDepositLog(context.Background(), &logs[1])
require.NoError(t, err)
pendingDeposits := web3Service.cfg.depositCache.PendingDeposits(context.Background(), nil /*blockNum*/)
@@ -272,8 +272,8 @@ func TestProcessETH2GenesisLog_8DuplicatePubkeys(t *testing.T) {
logs, err := testAcc.Backend.FilterLogs(web3Service.ctx, query)
require.NoError(t, err, "Unable to retrieve logs")
for _, log := range logs {
err = web3Service.ProcessLog(context.Background(), log)
for i := range logs {
err = web3Service.ProcessLog(context.Background(), &logs[i])
require.NoError(t, err)
}
assert.Equal(t, false, web3Service.chainStartData.Chainstarted, "Genesis has been triggered despite being 8 duplicate keys")
@@ -351,8 +351,8 @@ func TestProcessETH2GenesisLog(t *testing.T) {
stateSub := web3Service.cfg.stateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for _, log := range logs {
err = web3Service.ProcessLog(context.Background(), log)
for i := range logs {
err = web3Service.ProcessLog(context.Background(), &logs[i])
require.NoError(t, err)
}

View File

@@ -39,13 +39,14 @@ type EngineClient struct {
NumReconstructedPayloads uint64
TerminalBlockHash []byte
TerminalBlockHashExists bool
BuilderOverride bool
OverrideValidHash [32]byte
BlockValue uint64
BlobsBundle *pb.BlobsBundle
}
// NewPayload --
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ [][32]byte) ([]byte, error) {
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash) ([]byte, error) {
return e.NewPayloadResp, e.ErrNewPayload
}
@@ -60,26 +61,26 @@ func (e *EngineClient) ForkchoiceUpdated(
}
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, bool, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
return ed, e.BlobsBundle, nil
return ed, e.BlobsBundle, e.BuilderOverride, nil
}
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
return ed, nil, nil
return ed, nil, e.BuilderOverride, nil
}
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
if err != nil {
return nil, nil, err
return nil, nil, false, err
}
return p, nil, e.ErrGetPayload
return p, nil, e.BuilderOverride, e.ErrGetPayload
}
// ExchangeTransitionConfiguration --

View File

@@ -205,18 +205,30 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
bfs := backfill.NewStatus(beacon.db)
if err := bfs.Reload(ctx); err != nil {
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
return nil, err
}
bfs, err := backfill.NewUpdater(ctx, beacon.db)
if err != nil {
return nil, errors.Wrap(err, "backfill status initialization error")
}
bf, err := backfill.NewService(ctx, bfs, beacon.clockWaiter, beacon.fetchP2P())
if err != nil {
return nil, errors.Wrap(err, "error initializing backfill service")
}
if err := beacon.services.RegisterService(bf); err != nil {
return nil, errors.Wrap(err, "error registering backfill service")
}
log.Debugln("Starting State Gen")
if err := beacon.startStateGen(ctx, bfs, beacon.forkChoicer); err != nil {
return nil, err
}
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
if errors.Is(err, stategen.ErrNoGenesisBlock) {
log.Errorf("No genesis block/state is found. Prysm only provides a mainnet genesis "+
"state bundled in the application. You must provide the --%s or --%s flag to load "+
"a genesis block/state for this network.", "genesis-state", "genesis-beacon-api-url")
}
return nil, err
}
@@ -496,8 +508,8 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
return nil
}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status, fc forkchoice.ForkChoicer) error {
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.StatusUpdater, fc forkchoice.ForkChoicer) error {
opts := []stategen.StateGenOption{stategen.WithAvailableBlocker(bfs)}
sg := stategen.New(b.db, fc, opts...)
cp, err := b.db.FinalizedCheckpoint(ctx)

View File

@@ -34,14 +34,17 @@ func (s *Service) prepareForkChoiceAtts() {
ticker := slots.NewSlotTickerWithIntervals(time.Unix(int64(s.genesisTime), 0), intervals[:])
for {
select {
case <-ticker.C():
case slotInterval := <-ticker.C():
t := time.Now()
if err := s.batchForkChoiceAtts(s.ctx); err != nil {
log.WithError(err).Error("Could not prepare attestations for fork choice")
}
if slots.TimeIntoSlot(s.genesisTime) < intervals[1] {
batchForkChoiceAttsT1.Observe(float64(time.Since(t).Milliseconds()))
} else if slots.TimeIntoSlot(s.genesisTime) < intervals[2] {
switch slotInterval.Interval {
case 0:
duration := time.Since(t)
log.WithField("Duration", duration).Debug("aggregated unaggregated attestations")
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
case 1:
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
}
case <-s.ctx.Done():

View File

@@ -90,7 +90,7 @@ func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slo
}
continue
}
if err = blocks.VerifyExitAndSignature(validator, state.Slot(), state.Fork(), exit, state.GenesisValidatorsRoot()); err != nil {
if err = blocks.VerifyExitAndSignature(validator, state, exit); err != nil {
logrus.WithError(err).Warning("removing invalid exit from pool")
p.lock.RUnlock()
// MarkIncluded removes the invalid exit from the pool

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"addr_factory.go",
"assigner.go",
"broadcaster.go",
"config.go",
"connection_gater.go",

View File

@@ -0,0 +1 @@
package p2p

View File

@@ -121,6 +121,9 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultAttesterSlashingTopicParams(), nil
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
return defaultBlsToExecutionChangeTopicParams(), nil
case strings.Contains(topic, GossipBlobSidecarMessage):
// TODO(Deneb): Using the default block scoring. But this should be updated.
return defaultBlockTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"assigner.go",
"log.go",
"status.go",
],
@@ -14,6 +15,7 @@ go_library(
deps = [
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -28,6 +30,7 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -0,0 +1,72 @@
package peers
import (
"context"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/sirupsen/logrus"
)
// handshakePollingInterval is a polling interval for checking the number of received handshakes.
var handshakePollingInterval = 5 * time.Second
func NewAssigner(ctx context.Context, s *Status, max int, finalized primitives.Epoch) *Assigner {
return &Assigner{
ctx: ctx,
ps: s,
max: max,
finalized: finalized,
}
}
type Assigner struct {
sync.Mutex
ctx context.Context
ps *Status
max int
finalized primitives.Epoch
}
var ErrInsufficientSuitable = errors.New("no suitable peers")
func (a *Assigner) freshPeers() ([]peer.ID, error) {
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.finalized)
if len(peers) < required {
log.WithFields(logrus.Fields{
"suitable": len(peers),
"required": required}).Info("Unable to assign peer while suitable peers < required ")
return nil, ErrInsufficientSuitable
}
return peers, nil
}
// Assign uses the BestFinalized method to select the best peers that agree on a canonical block
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
// the number of outbound requests to each peer from a given component.
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
best, err := a.freshPeers()
ps := make([]peer.ID, 0, n)
if err != nil {
return nil, err
}
for _, p := range best {
if !busy[p] {
ps = append(ps, p)
if len(ps) == n {
return ps, nil
}
}
}
return ps, nil
}

View File

@@ -24,6 +24,7 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/blob:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",

View File

@@ -16,15 +16,15 @@ go_library(
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v2:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_r3labs_sse//:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
],
)
@@ -47,6 +47,6 @@ go_test(
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_r3labs_sse//:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
],
)

View File

@@ -8,7 +8,6 @@ import (
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
@@ -16,21 +15,11 @@ import (
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/r3labs/sse"
"github.com/r3labs/sse/v2"
)
const (
versionHeader = "Eth-Consensus-Version"
grpcVersionHeader = "Grpc-metadata-Eth-Consensus-Version"
jsonMediaType = "application/json"
octetStreamMediaType = "application/octet-stream"
)
// match a number with optional decimals
var priorityRegex = regexp.MustCompile(`q=(\d+(?:\.\d+)?)`)
type sszConfig struct {
fileName string
responseJson SszResponse
@@ -122,7 +111,7 @@ func handleGetSSZ(
req *http.Request,
config sszConfig,
) (handled bool) {
ssz, err := network.SszRequested(req)
ssz, err := http2.SszRequested(req)
if err != nil {
apimiddleware.WriteError(w, apimiddleware.InternalServerError(err), nil)
return true
@@ -215,44 +204,6 @@ func handlePostSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.E
return true
}
func sszRequested(req *http.Request) (bool, error) {
accept := req.Header.Values("Accept")
if len(accept) == 0 {
return false, nil
}
types := strings.Split(accept[0], ",")
currentType, currentPriority := "", 0.0
for _, t := range types {
values := strings.Split(t, ";")
name := values[0]
if name != api.JsonMediaType && name != api.OctetStreamMediaType {
continue
}
// no params specified
if len(values) == 1 {
priority := 1.0
if priority > currentPriority {
currentType, currentPriority = name, priority
}
continue
}
params := values[1]
match := priorityRegex.FindAllStringSubmatch(params, 1)
if len(match) != 1 {
continue
}
priority, err := strconv.ParseFloat(match[0][1], 32)
if err != nil {
return false, err
}
if priority > currentPriority {
currentType, currentPriority = name, priority
}
}
return currentType == api.OctetStreamMediaType, nil
}
func sszPosted(req *http.Request) bool {
ct, ok := req.Header["Content-Type"]
if !ok {

View File

@@ -16,7 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/r3labs/sse"
"github.com/r3labs/sse/v2"
)
type testSSZResponseJson struct {
@@ -42,95 +42,6 @@ func (t testSSZResponseJson) SSZFinalized() bool {
return t.Finalized
}
func TestSSZRequested(t *testing.T) {
t.Run("ssz_requested", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{api.OctetStreamMediaType}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
})
t.Run("ssz_content_type_first", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s", api.OctetStreamMediaType, api.JsonMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
})
t.Run("ssz_content_type_preferred_1", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.9,%s", api.JsonMediaType, api.OctetStreamMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
})
t.Run("ssz_content_type_preferred_2", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s;q=0.95,%s;q=0.9", api.OctetStreamMediaType, api.JsonMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, true, result)
})
t.Run("other_content_type_preferred", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9", api.JsonMediaType, api.OctetStreamMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
t.Run("other_params", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{fmt.Sprintf("%s,%s;q=0.9,otherparam=xyz", api.JsonMediaType, api.OctetStreamMediaType)}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
t.Run("no_header", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
t.Run("empty_header", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
t.Run("empty_header_value", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{""}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
t.Run("other_content_type", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{"application/other"}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
t.Run("garbage", func(t *testing.T) {
request := httptest.NewRequest("GET", "http://foo.example", nil)
request.Header["Accept"] = []string{"This is Sparta!!!"}
result, err := sszRequested(request)
require.NoError(t, err)
assert.Equal(t, false, result)
})
}
func TestPrepareSSZRequestForProxying(t *testing.T) {
middleware := &apimiddleware.ApiProxyMiddleware{
GatewayAddress: "http://apimiddleware.example",

View File

@@ -40,74 +40,6 @@ func wrapBLSChangesArray(
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/prepareBeaconProposer expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct.
func wrapFeeRecipientsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*FeeRecipientsRequestJSON); !ok {
return true, nil
}
recipients := make([]*FeeRecipientJson, 0)
if err := json.NewDecoder(req.Body).Decode(&recipients); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &FeeRecipientsRequestJSON{Recipients: recipients}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct.
func wrapSignedValidatorRegistrationsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SignedValidatorRegistrationsRequestJson); !ok {
return true, nil
}
registrations := make([]*SignedValidatorRegistrationJson, 0)
if err := json.NewDecoder(req.Body).Decode(&registrations); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SignedValidatorRegistrationsRequestJson{Registrations: registrations}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Beacon/submitPoolAttestations expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapAttestationsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitAttestationRequestJson); ok {
atts := make([]*AttestationJson, 0)
if err := json.NewDecoder(req.Body).Decode(&atts); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitAttestationRequestJson{Data: atts}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// Some endpoints e.g. https://ethereum.github.io/beacon-apis/#/Validator/getAttesterDuties expect posting a top-level array of validator indices.
// We make it more proto-friendly by wrapping it in a struct with an 'Index' field.
func wrapValidatorIndicesArray(
@@ -130,116 +62,6 @@ func wrapValidatorIndicesArray(
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/publishAggregateAndProofs expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSignedAggregateAndProofArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitAggregateAndProofsRequestJson); ok {
data := make([]*SignedAggregateAttestationAndProofJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitAggregateAndProofsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/prepareBeaconCommitteeSubnet expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapBeaconCommitteeSubscriptionsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitBeaconCommitteeSubscriptionsRequestJson); ok {
data := make([]*BeaconCommitteeSubscribeJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitBeaconCommitteeSubscriptionsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/prepareSyncCommitteeSubnets expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSyncCommitteeSubscriptionsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitSyncCommitteeSubscriptionRequestJson); ok {
data := make([]*SyncCommitteeSubscriptionJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitSyncCommitteeSubscriptionRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolSyncCommitteeSignatures expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSyncCommitteeSignaturesArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitSyncCommitteeSignaturesRequestJson); ok {
data := make([]*SyncCommitteeMessageJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitSyncCommitteeSignaturesRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/publishContributionAndProofs expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSignedContributionAndProofsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitContributionAndProofsRequestJson); ok {
data := make([]*SignedContributionAndProofJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitContributionAndProofsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
type phase0PublishBlockRequestJson struct {
Phase0Block *SignedBeaconBlockJson `json:"phase0_block"`
}

View File

@@ -19,46 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func TestWrapAttestationArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAttestationRequestJson{},
}
unwrappedAtts := []*AttestationJson{{AggregationBits: "1010"}}
unwrappedAttsJson, err := json.Marshal(unwrappedAtts)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedAttsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapAttestationsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedAtts := &SubmitAttestationRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedAtts))
require.Equal(t, 1, len(wrappedAtts.Data), "wrong number of wrapped items")
assert.Equal(t, "1010", wrappedAtts.Data[0].AggregationBits)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAttestationRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapAttestationsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapValidatorIndicesArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
@@ -140,268 +100,6 @@ func TestWrapBLSChangesArray(t *testing.T) {
})
}
func TestWrapSignedAggregateAndProofArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAggregateAndProofsRequestJson{},
}
unwrappedAggs := []*SignedAggregateAttestationAndProofJson{{Signature: "sig"}}
unwrappedAggsJson, err := json.Marshal(unwrappedAggs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedAggsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedAggregateAndProofArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedAggs := &SubmitAggregateAndProofsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedAggs))
require.Equal(t, 1, len(wrappedAggs.Data), "wrong number of wrapped items")
assert.Equal(t, "sig", wrappedAggs.Data[0].Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAggregateAndProofsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedAggregateAndProofArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapBeaconCommitteeSubscriptionsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
}
unwrappedSubs := []*BeaconCommitteeSubscribeJson{{
ValidatorIndex: "1",
CommitteeIndex: "1",
CommitteesAtSlot: "1",
Slot: "1",
IsAggregator: true,
}}
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSubsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSubs := &SubmitBeaconCommitteeSubscriptionsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
require.Equal(t, 1, len(wrappedSubs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteeIndex)
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteesAtSlot)
assert.Equal(t, "1", wrappedSubs.Data[0].Slot)
assert.Equal(t, true, wrappedSubs.Data[0].IsAggregator)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSyncCommitteeSubscriptionsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
}
unwrappedSubs := []*SyncCommitteeSubscriptionJson{
{
ValidatorIndex: "1",
SyncCommitteeIndices: []string{"1", "2"},
UntilEpoch: "1",
},
{
ValidatorIndex: "2",
SyncCommitteeIndices: []string{"3", "4"},
UntilEpoch: "2",
},
}
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSubsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSubs := &SubmitSyncCommitteeSubscriptionRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
require.Equal(t, 2, len(wrappedSubs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
require.Equal(t, 2, len(wrappedSubs.Data[0].SyncCommitteeIndices), "wrong number of committee indices")
assert.Equal(t, "1", wrappedSubs.Data[0].SyncCommitteeIndices[0])
assert.Equal(t, "2", wrappedSubs.Data[0].SyncCommitteeIndices[1])
assert.Equal(t, "1", wrappedSubs.Data[0].UntilEpoch)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSyncCommitteeSignaturesArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSignaturesRequestJson{},
}
unwrappedSigs := []*SyncCommitteeMessageJson{{
Slot: "1",
BeaconBlockRoot: "root",
ValidatorIndex: "1",
Signature: "sig",
}}
unwrappedSigsJson, err := json.Marshal(unwrappedSigs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSigsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSignaturesArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSigs := &SubmitSyncCommitteeSignaturesRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSigs))
require.Equal(t, 1, len(wrappedSigs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSigs.Data[0].Slot)
assert.Equal(t, "root", wrappedSigs.Data[0].BeaconBlockRoot)
assert.Equal(t, "1", wrappedSigs.Data[0].ValidatorIndex)
assert.Equal(t, "sig", wrappedSigs.Data[0].Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSignaturesRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSignaturesArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSignedContributionAndProofsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitContributionAndProofsRequestJson{},
}
unwrapped := []*SignedContributionAndProofJson{
{
Message: &ContributionAndProofJson{
AggregatorIndex: "1",
Contribution: &SyncCommitteeContributionJson{
Slot: "1",
BeaconBlockRoot: "root",
SubcommitteeIndex: "1",
AggregationBits: "bits",
Signature: "sig",
},
SelectionProof: "proof",
},
Signature: "sig",
},
{
Message: &ContributionAndProofJson{},
Signature: "sig",
},
}
unwrappedJson, err := json.Marshal(unwrapped)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedContributionAndProofsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrapped := &SubmitContributionAndProofsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrapped))
require.Equal(t, 2, len(wrapped.Data), "wrong number of wrapped items")
assert.Equal(t, "sig", wrapped.Data[0].Signature)
require.NotNil(t, wrapped.Data[0].Message)
msg := wrapped.Data[0].Message
assert.Equal(t, "1", msg.AggregatorIndex)
assert.Equal(t, "proof", msg.SelectionProof)
require.NotNil(t, msg.Contribution)
assert.Equal(t, "1", msg.Contribution.Slot)
assert.Equal(t, "root", msg.Contribution.BeaconBlockRoot)
assert.Equal(t, "1", msg.Contribution.SubcommitteeIndex)
assert.Equal(t, "bits", msg.Contribution.AggregationBits)
assert.Equal(t, "sig", msg.Contribution.Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitContributionAndProofsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedContributionAndProofsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()

View File

@@ -32,15 +32,11 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/beacon/blinded_blocks",
"/eth/v1/beacon/blocks/{block_id}",
"/eth/v2/beacon/blocks/{block_id}",
"/eth/v1/beacon/blocks/{block_id}/root",
"/eth/v1/beacon/blocks/{block_id}/attestations",
"/eth/v1/beacon/blinded_blocks/{block_id}",
"/eth/v1/beacon/pool/attestations",
"/eth/v1/beacon/pool/attester_slashings",
"/eth/v1/beacon/pool/proposer_slashings",
"/eth/v1/beacon/pool/voluntary_exits",
"/eth/v1/beacon/pool/bls_to_execution_changes",
"/eth/v1/beacon/pool/sync_committees",
"/eth/v1/beacon/pool/bls_to_execution_changes",
"/eth/v1/beacon/weak_subjectivity",
"/eth/v1/node/identity",
@@ -48,7 +44,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/node/peers/{peer_id}",
"/eth/v1/node/peer_count",
"/eth/v1/node/version",
"/eth/v1/node/syncing",
"/eth/v1/node/health",
"/eth/v1/debug/beacon/states/{state_id}",
"/eth/v2/debug/beacon/states/{state_id}",
@@ -65,15 +60,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/validator/blocks/{slot}",
"/eth/v2/validator/blocks/{slot}",
"/eth/v1/validator/blinded_blocks/{slot}",
"/eth/v1/validator/attestation_data",
"/eth/v1/validator/aggregate_attestation",
"/eth/v1/validator/beacon_committee_subscriptions",
"/eth/v1/validator/sync_committee_subscriptions",
"/eth/v1/validator/aggregate_and_proofs",
"/eth/v1/validator/sync_committee_contribution",
"/eth/v1/validator/contribution_and_proofs",
"/eth/v1/validator/prepare_beacon_proposer",
"/eth/v1/validator/register_validator",
"/eth/v1/validator/liveness/{epoch}",
}
}
@@ -136,8 +122,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeV2Block,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZV2}
case "/eth/v1/beacon/blocks/{block_id}/root":
endpoint.GetResponse = &BlockRootResponseJson{}
case "/eth/v1/beacon/blocks/{block_id}/attestations":
endpoint.GetResponse = &BlockAttestationsResponseJson{}
case "/eth/v1/beacon/blinded_blocks/{block_id}":
@@ -146,23 +130,12 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBlindedBeaconBlockSSZ}
case "/eth/v1/beacon/pool/attestations":
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
endpoint.GetResponse = &AttestationsPoolResponseJson{}
endpoint.PostRequest = &SubmitAttestationRequestJson{}
endpoint.Err = &IndexedVerificationFailureErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapAttestationsArray,
}
case "/eth/v1/beacon/pool/attester_slashings":
endpoint.PostRequest = &AttesterSlashingJson{}
endpoint.GetResponse = &AttesterSlashingsPoolResponseJson{}
case "/eth/v1/beacon/pool/proposer_slashings":
endpoint.PostRequest = &ProposerSlashingJson{}
endpoint.GetResponse = &ProposerSlashingsPoolResponseJson{}
case "/eth/v1/beacon/pool/voluntary_exits":
endpoint.PostRequest = &SignedVoluntaryExitJson{}
endpoint.GetResponse = &VoluntaryExitsPoolResponseJson{}
case "/eth/v1/beacon/pool/bls_to_execution_changes":
endpoint.PostRequest = &SubmitBLSToExecutionChangesRequest{}
endpoint.GetResponse = &BLSToExecutionChangesPoolResponseJson{}
@@ -170,12 +143,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapBLSChangesArray,
}
case "/eth/v1/beacon/pool/sync_committees":
endpoint.PostRequest = &SubmitSyncCommitteeSignaturesRequestJson{}
endpoint.Err = &IndexedVerificationFailureErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSyncCommitteeSignaturesArray,
}
case "/eth/v1/beacon/weak_subjectivity":
endpoint.GetResponse = &WeakSubjectivityResponse{}
case "/eth/v1/node/identity":
@@ -190,8 +157,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.GetResponse = &PeerCountResponseJson{}
case "/eth/v1/node/version":
endpoint.GetResponse = &VersionResponseJson{}
case "/eth/v1/node/syncing":
endpoint.GetResponse = &SyncingResponseJson{}
case "/eth/v1/node/health":
// Use default endpoint
case "/eth/v1/debug/beacon/states/{state_id}":
@@ -260,47 +225,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
case "/eth/v1/validator/attestation_data":
endpoint.GetResponse = &ProduceAttestationDataResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
case "/eth/v1/validator/aggregate_attestation":
endpoint.GetResponse = &AggregateAttestationResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "attestation_data_root", Hex: true}, {Name: "slot"}}
case "/eth/v1/validator/beacon_committee_subscriptions":
endpoint.PostRequest = &SubmitBeaconCommitteeSubscriptionsRequestJson{}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapBeaconCommitteeSubscriptionsArray,
}
case "/eth/v1/validator/sync_committee_subscriptions":
endpoint.PostRequest = &SubmitSyncCommitteeSubscriptionRequestJson{}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSyncCommitteeSubscriptionsArray,
}
case "/eth/v1/validator/aggregate_and_proofs":
endpoint.PostRequest = &SubmitAggregateAndProofsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedAggregateAndProofArray,
}
case "/eth/v1/validator/sync_committee_contribution":
endpoint.GetResponse = &ProduceSyncCommitteeContributionResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "subcommittee_index"}, {Name: "beacon_block_root", Hex: true}}
case "/eth/v1/validator/contribution_and_proofs":
endpoint.PostRequest = &SubmitContributionAndProofsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedContributionAndProofsArray,
}
case "/eth/v1/validator/prepare_beacon_proposer":
endpoint.PostRequest = &FeeRecipientsRequestJSON{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapFeeRecipientsArray,
}
case "/eth/v1/validator/register_validator":
endpoint.PostRequest = &SignedValidatorRegistrationsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedValidatorRegistrationsArray,
}
case "/eth/v1/validator/liveness/{epoch}":
endpoint.PostRequest = &ValidatorIndicesJson{}
endpoint.PostResponse = &LivenessResponseJson{}

View File

@@ -4,7 +4,7 @@ import (
"strings"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
)
@@ -31,10 +31,6 @@ type WeakSubjectivityResponse struct {
} `json:"data"`
}
type FeeRecipientsRequestJSON struct {
Recipients []*FeeRecipientJson `json:"recipients"`
}
type StateRootResponseJson struct {
Data *StateRootResponse_StateRootJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
@@ -143,14 +139,6 @@ type BlockAttestationsResponseJson struct {
Finalized bool `json:"finalized"`
}
type AttestationsPoolResponseJson struct {
Data []*AttestationJson `json:"data"`
}
type SubmitAttestationRequestJson struct {
Data []*AttestationJson `json:"data"`
}
type AttesterSlashingsPoolResponseJson struct {
Data []*AttesterSlashingJson `json:"data"`
}
@@ -159,14 +147,6 @@ type ProposerSlashingsPoolResponseJson struct {
Data []*ProposerSlashingJson `json:"data"`
}
type VoluntaryExitsPoolResponseJson struct {
Data []*SignedVoluntaryExitJson `json:"data"`
}
type SubmitSyncCommitteeSignaturesRequestJson struct {
Data []*SyncCommitteeMessageJson `json:"data"`
}
type BLSToExecutionChangesPoolResponseJson struct {
Data []*SignedBLSToExecutionChangeJson `json:"data"`
}
@@ -199,7 +179,7 @@ type VersionResponseJson struct {
}
type SyncingResponseJson struct {
Data *helpers.SyncDetailsJson `json:"data"`
Data *shared.SyncDetails `json:"data"`
}
type BeaconStateResponseJson struct {
@@ -268,18 +248,10 @@ type ProduceBlindedBlockResponseJson struct {
Data *BlindedBeaconBlockContainerJson `json:"data"`
}
type ProduceAttestationDataResponseJson struct {
Data *AttestationDataJson `json:"data"`
}
type AggregateAttestationResponseJson struct {
Data *AttestationJson `json:"data"`
}
type SubmitBeaconCommitteeSubscriptionsRequestJson struct {
Data []*BeaconCommitteeSubscribeJson `json:"data"`
}
type BeaconCommitteeSubscribeJson struct {
ValidatorIndex string `json:"validator_index"`
CommitteeIndex string `json:"committee_index"`
@@ -288,28 +260,10 @@ type BeaconCommitteeSubscribeJson struct {
IsAggregator bool `json:"is_aggregator"`
}
type SubmitSyncCommitteeSubscriptionRequestJson struct {
Data []*SyncCommitteeSubscriptionJson `json:"data"`
}
type SyncCommitteeSubscriptionJson struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
UntilEpoch string `json:"until_epoch"`
}
type SubmitAggregateAndProofsRequestJson struct {
Data []*SignedAggregateAttestationAndProofJson `json:"data"`
}
type ProduceSyncCommitteeContributionResponseJson struct {
Data *SyncCommitteeContributionJson `json:"data"`
}
type SubmitContributionAndProofsRequestJson struct {
Data []*SignedContributionAndProofJson `json:"data"`
}
type ForkChoiceNodeResponseJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
@@ -719,8 +673,8 @@ type ExecutionPayloadDenebJson struct {
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
DataGasUsed string `json:"data_gas_used"` // new in deneb
ExcessDataGas string `json:"excess_data_gas"` // new in deneb
BlobGasUsed string `json:"blob_gas_used"` // new in deneb
ExcessBlobGas string `json:"excess_blob_gas"` // new in deneb
BlockHash string `json:"block_hash" hex:"true"`
Transactions []string `json:"transactions" hex:"true"`
Withdrawals []*WithdrawalJson `json:"withdrawals"`
@@ -774,8 +728,8 @@ type ExecutionPayloadHeaderDenebJson struct {
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
DataGasUsed string `json:"data_gas_used"` // new in deneb
ExcessDataGas string `json:"excess_data_gas"` // new in deneb
BlobGasUsed string `json:"blob_gas_used"` // new in deneb
ExcessBlobGas string `json:"excess_blob_gas"` // new in deneb
BlockHash string `json:"block_hash" hex:"true"`
TransactionsRoot string `json:"transactions_root" hex:"true"`
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
@@ -832,11 +786,6 @@ type IndexedAttestationJson struct {
Signature string `json:"signature" hex:"true"`
}
type FeeRecipientJson struct {
ValidatorIndex string `json:"validator_index"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
}
type AttestationJson struct {
AggregationBits string `json:"aggregation_bits" hex:"true"`
Data *AttestationDataJson `json:"data"`
@@ -888,13 +837,6 @@ type VoluntaryExitJson struct {
ValidatorIndex string `json:"validator_index"`
}
type SyncCommitteeMessageJson struct {
Slot string `json:"slot"`
BeaconBlockRoot string `json:"beacon_block_root" hex:"true"`
ValidatorIndex string `json:"validator_index"`
Signature string `json:"signature" hex:"true"`
}
type IdentityJson struct {
PeerId string `json:"peer_id"`
Enr string `json:"enr"`
@@ -1196,22 +1138,6 @@ type SyncCommitteeContributionJson struct {
Signature string `json:"signature" hex:"true"`
}
type ValidatorRegistrationJson struct {
FeeRecipient string `json:"fee_recipient" hex:"true"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey string `json:"pubkey" hex:"true"`
}
type SignedValidatorRegistrationJson struct {
Message *ValidatorRegistrationJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type SignedValidatorRegistrationsRequestJson struct {
Registrations []*SignedValidatorRegistrationJson `json:"registrations"`
}
type ForkChoiceNodeJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
@@ -1411,7 +1337,7 @@ type SingleIndexedVerificationFailureJson struct {
type NodeSyncDetailsErrorJson struct {
apimiddleware.DefaultErrorJson
SyncDetails helpers.SyncDetailsJson `json:"sync_details"`
SyncDetails shared.SyncDetails `json:"sync_details"`
}
type EventErrorJson struct {

View File

@@ -4,22 +4,40 @@ go_library(
name = "go_default_library",
srcs = [
"errors.go",
"log.go",
"service.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -0,0 +1,5 @@
package core
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "rpc/core")

View File

@@ -0,0 +1,23 @@
package core
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
)
type Service struct {
HeadFetcher blockchain.HeadFetcher
GenesisTimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
Broadcaster p2p.Broadcaster
SyncCommitteePool synccommittee.Pool
OperationNotifier opfeed.Notifier
AttestationCache *cache.AttestationCache
StateGen stategen.StateManager
P2P p2p.Broadcaster
}

View File

@@ -1,34 +1,71 @@
package core
import (
"bytes"
"context"
"fmt"
"sort"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
func ComputeValidatorPerformance(
// AggregateBroadcastFailedError represents an error scenario where
// broadcasting an aggregate selection proof failed.
type AggregateBroadcastFailedError struct {
err error
}
// NewAggregateBroadcastFailedError creates a new error instance.
func NewAggregateBroadcastFailedError(err error) AggregateBroadcastFailedError {
return AggregateBroadcastFailedError{
err: err,
}
}
// Error returns the underlying error message.
func (e *AggregateBroadcastFailedError) Error() string {
return fmt.Sprintf("could not broadcast signed aggregated attestation: %s", e.err.Error())
}
// ComputeValidatorPerformance reports the validator's latest balance along with other important metrics on
// rewards and penalties throughout its lifecycle in the beacon chain.
func (s *Service) ComputeValidatorPerformance(
ctx context.Context,
req *ethpb.ValidatorPerformanceRequest,
headFetcher blockchain.HeadFetcher,
currSlot primitives.Slot,
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
headState, err := headFetcher.HeadState(ctx)
if s.SyncChecker.Syncing() {
return nil, &RpcError{Reason: Unavailable, Err: errors.New("Syncing to latest head, not ready to respond")}
}
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
}
currSlot := s.GenesisTimeFetcher.CurrentSlot()
if currSlot > headState.Slot() {
headRoot, err := headFetcher.HeadRoot(ctx)
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
}
@@ -166,3 +203,283 @@ func ComputeValidatorPerformance(
InactivityScores: inactivityScores, // Only populated in Altair
}, nil
}
// SubmitSignedContributionAndProof is called by a sync committee aggregator
// to submit signed contribution and proof object.
func (s *Service) SubmitSignedContributionAndProof(
ctx context.Context,
req *ethpb.SignedContributionAndProof,
) *RpcError {
errs, ctx := errgroup.WithContext(ctx)
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
errs.Go(func() error {
return s.Broadcaster.Broadcast(ctx, req)
})
if err := s.SyncCommitteePool.SaveSyncCommitteeContribution(req.Message.Contribution); err != nil {
return &RpcError{Err: err, Reason: Internal}
}
// Wait for p2p broadcast to complete and return the first error (if any)
err := errs.Wait()
if err != nil {
return &RpcError{Err: err, Reason: Internal}
}
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.SyncCommitteeContributionReceived,
Data: &opfeed.SyncCommitteeContributionReceivedData{
Contribution: req,
},
})
return nil
}
// SubmitSignedAggregateSelectionProof verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
func (s *Service) SubmitSignedAggregateSelectionProof(
ctx context.Context,
req *ethpb.SignedAggregateSubmitRequest,
) *RpcError {
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
return &RpcError{Err: errors.New("signed aggregate request can't be nil"), Reason: BadRequest}
}
emptySig := make([]byte, fieldparams.BLSSignatureLength)
if bytes.Equal(req.SignedAggregateAndProof.Signature, emptySig) ||
bytes.Equal(req.SignedAggregateAndProof.Message.SelectionProof, emptySig) {
return &RpcError{Err: errors.New("signed signatures can't be zero hashes"), Reason: BadRequest}
}
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
s.GenesisTimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return &RpcError{Err: errors.New("attestation slot is no longer valid from current time"), Reason: BadRequest}
}
if err := s.Broadcaster.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
return &RpcError{Err: &AggregateBroadcastFailedError{err: err}, Reason: Internal}
}
log.WithFields(logrus.Fields{
"slot": req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
"committeeIndex": req.SignedAggregateAndProof.Message.Aggregate.Data.CommitteeIndex,
"validatorIndex": req.SignedAggregateAndProof.Message.AggregatorIndex,
"aggregatedCount": req.SignedAggregateAndProof.Message.Aggregate.AggregationBits.Count(),
}).Debug("Broadcasting aggregated attestation and proof")
return nil
}
// AggregatedSigAndAggregationBits returns the aggregated signature and aggregation bits
// associated with a particular set of sync committee messages.
func (s *Service) AggregatedSigAndAggregationBits(
ctx context.Context,
req *ethpb.AggregatedSigAndAggregationBitsRequest) ([]byte, []byte, error) {
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
sigs := make([][]byte, 0, subCommitteeSize)
bits := ethpb.NewSyncCommitteeAggregationBits()
for _, msg := range req.Msgs {
if bytes.Equal(req.BlockRoot, msg.BlockRoot) {
headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, req.Slot)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get sync subcommittee index")
}
for _, index := range headSyncCommitteeIndices {
i := uint64(index)
subnetIndex := i / subCommitteeSize
indexMod := i % subCommitteeSize
if subnetIndex == req.SubnetId && !bits.BitAt(indexMod) {
bits.SetBitAt(indexMod, true)
sigs = append(sigs, msg.Signature)
}
}
}
}
aggregatedSig := make([]byte, 96)
aggregatedSig[0] = 0xC0
if len(sigs) != 0 {
uncompressedSigs, err := bls.MultipleSignaturesFromBytes(sigs)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not decompress signatures")
}
aggregatedSig = bls.AggregateSignatures(uncompressedSigs).Marshal()
}
return aggregatedSig, bits, nil
}
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
func AssignValidatorToSubnet(pubkey []byte, status validator.ValidatorStatus) {
if status != validator.Active {
return
}
assignValidatorToSubnet(pubkey)
}
// AssignValidatorToSubnetProto checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
//
// It has a Proto suffix because the status is a protobuf type.
func AssignValidatorToSubnetProto(pubkey []byte, status ethpb.ValidatorStatus) {
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
return
}
assignValidatorToSubnet(pubkey)
}
func assignValidatorToSubnet(pubkey []byte) {
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets(pubkey)
if ok && expTime.After(prysmTime.Now()) {
return
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
var assignedIdxs []uint64
randGen := rand.NewGenerator()
for i := uint64(0); i < params.BeaconConfig().RandomSubnetsPerValidator; i++ {
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
}
assignedDuration := uint64(randGen.Intn(int(params.BeaconConfig().EpochsPerRandomSubnetSubscription)))
assignedDuration += params.BeaconConfig().EpochsPerRandomSubnetSubscription
totalDuration := epochDuration * time.Duration(assignedDuration)
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
}
// GetAttestationData requests that the beacon node produces attestation data for
// the requested committee index and slot based on the nodes current head.
func (s *Service) GetAttestationData(
ctx context.Context, req *ethpb.AttestationDataRequest,
) (*ethpb.AttestationData, *RpcError) {
if err := helpers.ValidateAttestationTime(
req.Slot,
s.GenesisTimeFetcher.GenesisTime(),
params.BeaconNetworkConfig().MaximumGossipClockDisparity,
); err != nil {
return nil, &RpcError{Reason: BadRequest, Err: errors.Errorf("invalid request: %v", err)}
}
res, err := s.AttestationCache.Get(ctx, req)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
}
if res != nil {
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
if err := s.AttestationCache.MarkInProgress(req); err != nil {
if errors.Is(err, cache.ErrAlreadyInProgress) {
res, err := s.AttestationCache.Get(ctx, req)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
}
if res == nil {
return nil, &RpcError{Reason: Internal, Err: errors.New("a request was in progress and resolved to nil")}
}
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not mark attestation as in-progress: %v", err)}
}
defer func() {
if err := s.AttestationCache.MarkNotInProgress(req); err != nil {
log.WithError(err).Error("could not mark attestation as not-in-progress")
}
}()
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head state: %v", err)}
}
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head root: %v", err)}
}
// In the case that we receive an attestation request after a newer state/block has been processed.
if headState.Slot() > req.Slot {
headRoot, err = helpers.BlockRootAtSlot(headState, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head root: %v", err)}
}
headState, err = s.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(headRoot))
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head state: %v", err)}
}
}
if headState == nil || headState.IsNil() {
return nil, &RpcError{Reason: Internal, Err: errors.New("could not lookup parent state from head")}
}
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) {
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
}
}
targetEpoch := coreTime.CurrentEpoch(headState)
epochStartSlot, err := slots.EpochStart(targetEpoch)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not calculate epoch start: %v", err)}
}
var targetRoot []byte
if epochStartSlot == headState.Slot() {
targetRoot = headRoot
} else {
targetRoot, err = helpers.BlockRootAtSlot(headState, epochStartSlot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get target block for slot %d: %v", epochStartSlot, err)}
}
if bytesutil.ToBytes32(targetRoot) == params.BeaconConfig().ZeroHash {
targetRoot = headRoot
}
}
res = &ethpb.AttestationData{
Slot: req.Slot,
CommitteeIndex: req.CommitteeIndex,
BeaconBlockRoot: headRoot,
Source: headState.CurrentJustifiedCheckpoint(),
Target: &ethpb.Checkpoint{
Epoch: targetEpoch,
Root: targetRoot,
},
}
if err := s.AttestationCache.Put(ctx, req, res); err != nil {
log.WithError(err).Error("could not store attestation data in cache")
}
return res, nil
}
// SubmitSyncMessage submits the sync committee message to the network.
// It also saves the sync committee message into the pending pool for block inclusion.
func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) error {
errs, ctx := errgroup.WithContext(ctx)
headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, msg.Slot)
if err != nil {
return err
}
// Broadcasting and saving message into the pool in parallel. As one fail should not affect another.
// This broadcasts for all subnets.
for _, index := range headSyncCommitteeIndices {
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
subnet := uint64(index) / subCommitteeSize
errs.Go(func() error {
return s.P2P.BroadcastSyncCommitteeMessage(ctx, subnet, msg)
})
}
if err := s.SyncCommitteePool.SaveSyncCommitteeMessage(msg); err != nil {
return err
}
// Wait for p2p broadcast to complete and return the first error (if any)
return errs.Wait()
}

View File

@@ -7,6 +7,7 @@ go_library(
"blocks.go",
"config.go",
"handlers.go",
"handlers_pool.go",
"log.go",
"pool.go",
"server.go",
@@ -36,7 +37,9 @@ go_library(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -44,17 +47,18 @@ go_library(
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
@@ -63,9 +67,9 @@ go_library(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_go_playground_validator_v10//:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//:go_default_library",
@@ -83,6 +87,7 @@ go_test(
"blinded_blocks_test.go",
"blocks_test.go",
"config_test.go",
"handlers_pool_test.go",
"handlers_test.go",
"init_test.go",
"pool_test.go",
@@ -94,13 +99,13 @@ go_test(
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/blstoexec/mock:go_default_library",
@@ -108,9 +113,12 @@ go_test(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits/mock:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -120,12 +128,14 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
@@ -137,11 +147,13 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",

View File

@@ -489,7 +489,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "deneb")
md.Set(api.VersionHeader, "deneb")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -509,7 +509,7 @@ func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "deneb")
md.Set(api.VersionHeader, "deneb")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)

View File

@@ -3,7 +3,6 @@ package beacon
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes/empty"
@@ -18,7 +17,6 @@ import (
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/network/forks"
@@ -582,93 +580,6 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
// GetBlockRoot retrieves hashTreeRoot of ReadOnlyBeaconBlock/BeaconBlockHeader.
func (bs *Server) GetBlockRoot(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockRootResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlockRoot")
defer span.End()
var root []byte
var err error
switch string(req.BlockId) {
case "head":
root, err = bs.ChainInfoFetcher.HeadRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head block: %v", err)
}
if root == nil {
return nil, status.Errorf(codes.NotFound, "No head root was found")
}
case "finalized":
finalized := bs.ChainInfoFetcher.FinalizedCheckpt()
root = finalized.Root
case "genesis":
blk, err := bs.BeaconDB.GenesisBlock(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return nil, status.Errorf(codes.NotFound, "Could not find genesis block: %v", err)
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, status.Error(codes.Internal, "Could not hash genesis block")
}
root = blkRoot[:]
default:
if len(req.BlockId) == 32 {
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(req.BlockId))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve block for block root %#x: %v", req.BlockId, err)
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return nil, status.Errorf(codes.NotFound, "Could not find block: %v", err)
}
root = req.BlockId
} else {
slot, err := strconv.ParseUint(string(req.BlockId), 10, 64)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Could not parse block ID: %v", err)
}
hasRoots, roots, err := bs.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", slot, err)
}
if !hasRoots {
return nil, status.Error(codes.NotFound, "Could not find any blocks with given slot")
}
root = roots[0][:]
if len(roots) == 1 {
break
}
for _, blockRoot := range roots {
canonical, err := bs.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine if block root is canonical: %v", err)
}
if canonical {
root = blockRoot[:]
break
}
}
}
}
b32Root := bytesutil.ToBytes32(root)
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
return &ethpbv1.BlockRootResponse{
Data: &ethpbv1.BlockRootContainer{
Root: root,
},
ExecutionOptimistic: isOptimistic,
Finalized: bs.FinalizationFetcher.IsFinalized(ctx, b32Root),
}, nil
}
// ListBlockAttestations retrieves attestation included in requested block.
func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv1.BlockAttestationsResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.ListBlockAttestations")

View File

@@ -585,7 +585,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "deneb")
md.Set(api.VersionHeader, "deneb")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
@@ -605,7 +605,7 @@ func TestServer_SubmitBlockSSZ(t *testing.T) {
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "deneb")
md.Set(api.VersionHeader, "deneb")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
@@ -964,173 +964,6 @@ func TestServer_GetBlockSSZV2(t *testing.T) {
})
}
func TestServer_GetBlockRoot(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
t.Run("get root", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
FinalizedRoots: map[[32]byte]bool{},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want []byte
wantErr bool
}{
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical slot",
blockID: []byte("30"),
want: blkContainers[30].BlockRoot,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.BlockRoot,
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].BlockRoot,
},
{
name: "genesis",
blockID: []byte("genesis"),
want: root[:],
},
{
name: "genesis root",
blockID: root[:],
want: root[:],
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].BlockRoot,
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "slot",
blockID: []byte("40"),
want: blkContainers[40].BlockRoot,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
assert.DeepEqual(t, tt.want, blockRootResp.Data.Root)
})
}
})
t.Run("execution optimistic", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
FinalizedRoots: map[[32]byte]bool{},
OptimisticRoots: map[[32]byte]bool{
bytesutil.ToBytes32(headBlock.BlockRoot): true,
},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
})
require.NoError(t, err)
assert.Equal(t, true, blockRootResp.ExecutionOptimistic)
})
t.Run("finalized", func(t *testing.T) {
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainFetcher := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
FinalizedRoots: map[[32]byte]bool{
bytesutil.ToBytes32(blkContainers[32].BlockRoot): true,
bytesutil.ToBytes32(blkContainers[64].BlockRoot): false,
},
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainFetcher,
HeadFetcher: mockChainFetcher,
OptimisticModeFetcher: mockChainFetcher,
FinalizationFetcher: mockChainFetcher,
}
t.Run("true", func(t *testing.T) {
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("32"),
})
require.NoError(t, err)
assert.Equal(t, true, blockRootResp.Finalized)
})
t.Run("false", func(t *testing.T) {
blockRootResp, err := bs.GetBlockRoot(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("64"),
})
require.NoError(t, err)
assert.Equal(t, false, blockRootResp.Finalized)
})
})
}
func TestServer_ListBlockAttestations(t *testing.T) {
ctx := context.Background()

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More