Compare commits

...

85 Commits

Author SHA1 Message Date
Kasey Kirkham
55dd05c8c0 testnet generate-genesis support for deneb 2023-06-15 14:43:10 -05:00
kasey
4f7db3dc67 BlobSidecarsByRoot (#12420)
* BlobSidecarsByRoot RPC handler

* BlobSidecarsByRange rpc handler (#12499)

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-06-15 19:38:08 +00:00
james-prysm
4ba782a815 introducing deneb changes and blobs to builder (#12477) 2023-06-14 13:04:48 -05:00
terencechain
ceb1ad3ed5 feat(config): update max blobs per block to 6 (#12512) 2023-06-12 12:43:19 -07:00
terencechain
8c142c45dc feat(blockchain): update payload attribute for deneb (#12509) 2023-06-12 12:43:19 -07:00
terencechain
28e4bd4fa0 feat(rpc/validator): stream and use deneb block (#12510) 2023-06-12 12:43:19 -07:00
terencechain
157e53ffc0 Update ProposeBeaconBlock Prysm RPC for Deneb (Non builder) (#12495) 2023-06-12 12:43:19 -07:00
terencechain
b31235ecaa Add data gas used field to execution payload (#12488) 2023-06-12 12:43:19 -07:00
terencechain
228d2f7968 Proposer RPC: GetBlock for Deneb (#12481) 2023-06-12 12:43:19 -07:00
terencechain
865365d2c2 Update block's commitment size (#12470) 2023-06-12 12:43:19 -07:00
terencechain
ade2f5e8e3 Deneb: implements engine API end points (#12384) 2023-06-12 12:43:19 -07:00
james-prysm
9b94e5d244 validator signing deneb (#12449)
* validator signing feature migrated from eip4844 branch, added unit tests

* Update proto/prysm/v1alpha1/beacon_block.proto

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-06-12 12:43:19 -07:00
terencechain
748f7e3665 Add Deneb upgrade function (#12433) 2023-06-12 12:43:19 -07:00
terencechain
21ad92eef8 P2P: broadcast blob (#12419) 2023-06-12 12:43:19 -07:00
terencechain
d541086f28 Add Blob Gossip (#12413) 2023-06-12 12:43:19 -07:00
Radosław Kapka
b3b1d3a4d0 Deneb integration: Beacon API getStateV2 (#12424)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Fix Migration Of State (#12423)
2023-06-12 12:43:19 -07:00
terencechain
92fd77e8c7 Deneb DB methods (#12379) 2023-06-12 12:43:19 -07:00
terencechain
ca1ab75b6f Add Deneb state (#12375) 2023-06-12 12:43:19 -07:00
terencechain
4e9de552e0 Add deneb block to consensus types (#12368) 2023-06-12 12:43:19 -07:00
terencechain
4edf1c20a7 Add deneb protobufs (#12363) 2023-06-12 12:43:19 -07:00
terencechain
2b410893a0 optimization: epoch boundary uses next slot cache (#12515)
* optimization: epoch boundary uses next slot cache

* test: fix

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-12 17:13:49 +00:00
Potuz
826267310e benchmark cold hashing of capella beaconstates (#12516)
* benchmark cold hashing of capella beaconstates

* use since
2023-06-12 16:54:43 +00:00
Nishant Das
d5057cfb42 Add the Ability for Prysm To Handle Trusted Peers (#12492)
* add all changes

* add to peers to watch

* add tests

* Update beacon-chain/p2p/peers/peerdata/store_test.go

* radek's review

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-06-12 14:47:52 +00:00
james-prysm
8d01cf2ec1 change update duties to handle all validators exited check (#12505)
* wip have update duties handle all validators updated

* removing function and adding tests

* removing unnessesary test

* fixing unit test

* gaz

* removing number on wait group

* trying lower threshold to reduce timeout

* testing removal of test to resolve timeout on buildkite

* gaz

* removing test that is breaking buildkite on timeouts, will need to return to revaluate difference between buildkite and local mock

* addressing feedback

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-12 14:27:52 +00:00
Potuz
e4e315da94 log validation time for blocks (#12514) 2023-06-12 22:06:57 +08:00
terencechain
0a4e42545e Use next slot cache for sync committee (#12287)
* Use next slot cache for sync committee

* RWMutex

* change mutex for last cached state

* feat: change mutex

* test: add db

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-12 04:30:06 +00:00
kasey
6fa2d768b5 Checkpoint sync: get block using state.latest_block_header.slot (#12447)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-11 03:00:38 +00:00
Nishant Das
0f228896b0 Add Patch For Libp2p (#12507)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-11 01:59:18 +00:00
terencechain
6896b41963 optimization(proposer rpc): move htr to after broadcast (#12504) 2023-06-09 06:32:29 -07:00
Nishant Das
3bf6abe27c Ignore Phase0 Blocks For Monitor (#12503) 2023-06-09 05:00:36 +00:00
Nishant Das
c1391f0de3 Always Favour Yamux for Multiplexing (#12502) 2023-06-08 04:02:46 +00:00
james-prysm
6672d1499a prysmctl: output proposer settings (#12181)
* wip proposer settings

* WIP validator client APIs

* adding proposer settings output

* adding unit tests

* fixing linting

* fixing deepsource issues

* fixing e2e

* fixing deep source issue

* updating naming to not stutter

* updating bazel

* fixing linting error

* reverting comment

* adding builder settings

* gaz

* Update validator/client/validator.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* adding comments

* adding some tests

* gaz

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/errors.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/cmd.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/errors.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing feedback

* fixing unit test

* addressign comments

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-06-06 17:03:30 +00:00
Nishant Das
33cf52831c Update Libp2p to v0.27.5 (#12486)
* add deps

* update to v0.27.5 and handle panic
2023-06-06 08:41:15 +08:00
terencechain
d543e9be00 Update spec tests to v1.4.0-alpha.1 (#12489) 2023-06-03 11:17:13 +00:00
Nishant Das
0669050ffa Add Appropriate Size for the Attestation Queue (#12485)
* add tag

* fix off by 1
2023-06-02 11:33:28 +00:00
zghh
ceff0c2024 Fix the bug that return 500 in /eth/v1/node/peers interface (#12483)
* Fix the bug that return 500 in /eth/v1/node/peers interface

* Update node.go

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-06-02 03:27:17 +00:00
Radosław Kapka
c32b581e8e Add broadcast_validation to block publishing (#12432)
* day 1

* day 2

* day 2+

* day 3

* day 4

* making bazel happy

* PublishBlindedBlockV2

* remove file

* use lock in insertSeenProposerIndex

* remove EquivocationChecker interface

* update deps.bzl

* remove middleware json tags

* go mod tidy

* remove redundant return statements

* validate in handler

* improvements

* extract common code

* remove import

* sync test fix

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-06-01 11:22:49 +00:00
terencechain
e516a2004f Update next slot cache correctly under late task (#12462) 2023-05-31 08:50:37 -07:00
terencechain
cb65d8af96 Proposer RPC: make setExecutionData better (#12466) 2023-05-31 06:06:32 -07:00
Nishant Das
70152bf476 Copy All Field Tries For Late Blocks (#12461)
* add new thing

* only have it for late blocks

* comments

* change to lock

* add test

* Update beacon-chain/state/state-native/state_test.go

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-05-30 09:57:20 +00:00
Radosław Kapka
8aa688729d Cleanup of ProposerPayloadIDsCache (#12474)
* Cleanup of `ProposerPayloadIDsCache`

* one more comment

* Update beacon-chain/cache/payload_id.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* Update beacon-chain/cache/payload_id.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-05-29 16:10:28 +00:00
Preston Van Loon
1ffc92999f p2p: Check peer threshold is met before giving up on ctx deadline (#12446)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-28 13:24:59 +00:00
terencechain
2dcef85f97 Add spec test for v1.4.0-alpha.0 (#12460)
* Fix spec test

* Fix sha

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-25 14:05:43 +00:00
Nishant Das
52da7b3de6 Release Lock Before Panicking (#12464) 2023-05-25 06:42:21 -07:00
terencechain
be16b64535 Remove SubmitBlindBlock context timeout (#12453) 2023-05-24 14:19:23 +00:00
terencechain
f4d3939b62 Add logs for build block times (#12452)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-24 13:37:26 +00:00
Nishant Das
245d8a29e0 Optimize Zerohash Comparisons In Forkchoice (#12458) 2023-05-24 09:58:02 +00:00
james-prysm
666188dfea Improve validator import logs (#12429)
* adding small ux improvement

* gaz

* rolling back dir test changes

* Update validator/accounts/accounts_import.go

* adding review suggestion

* missed else part of statement

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-05-23 15:41:41 -05:00
Preston Van Loon
cfa64ae013 Restore disable-peer-scorer flag (#12386)
* Revert "Make Peer Scorer Permanent Default (#12138)"

This reverts commit 4d28d69fd9.

* make peer scoring flag warning scary
2023-05-23 13:53:02 +00:00
Potuz
cd0f814f2e fixed erroneous panic (#12450) 2023-05-23 11:12:31 +00:00
Radosław Kapka
abc81e6dde Merge all block unblinding code into a single unblinder struct (#12240)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-05-23 11:38:52 +02:00
terencechain
6b26183e73 Add missing config yamls for domains (#12442)
* Add missing config yamls for domains

* Fix GetSpec test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 18:02:41 +00:00
Preston Van Loon
7fe935e94d Fix metric name from PR #12430 (#12445)
* Fix metric name from PR #12430

* @potuz can't spell 'unknown'

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 17:43:21 +00:00
Potuz
e0e7c71eb5 Fix sandwich attack on honest reorgs (#12418)
* Fix sandwich attack on honest reorgs

* fix test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 17:23:20 +00:00
Potuz
c80019bd0b remove trivial helper (#12443) 2023-05-22 15:59:16 +00:00
james-prysm
8dfb92c605 reverting expiration logic on validator while using --enable-registration-cache (#12436)
* reverting expiration logic

* gaz
2023-05-22 14:54:09 +00:00
Potuz
9d192a3608 Remove unused function (#12439)
* Remove unused function

* gazelle
2023-05-22 11:09:08 -03:00
Nishant Das
51bde7a845 disable it (#12438) 2023-05-22 19:18:13 +08:00
kasey
385a317902 Revert initsync revert (#12431)
* Revert "Revert "BeaconBlocksByRange and BlobSidecarsByRange consistency (#123… (#12426)"

This reverts commit ddc1e48e05.

* fix metrics bug, add batch.next tests

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-19 16:59:13 +00:00
Potuz
b84dd40ba9 Use forkchoice to validate sync messages faster (#12430)
* Use forkchoice to validate sync messages faster

* add metric
2023-05-19 14:47:39 +00:00
kasey
aeaa72fdc2 fix deadlock between monitor service and init-sync (#12427)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-18 18:35:06 +00:00
kasey
ddc1e48e05 Revert "BeaconBlocksByRange and BlobSidecarsByRange consistency (#123… (#12426)
This reverts commit 73e4bdccbb.

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-18 18:01:26 +00:00
Nishant Das
f91159337b Fix Migration Of State (#12423) 2023-05-18 13:18:56 +00:00
Potuz
537236e1c9 Add aggregation metrics (#12417) 2023-05-17 12:18:59 -07:00
kasey
73e4bdccbb BeaconBlocksByRange and BlobSidecarsByRange consistency (#12383)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-17 12:16:10 +00:00
Potuz
f54bd64bdd Default aggregation ticker times (#12412)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-16 19:38:44 +00:00
james-prysm
d907cae595 persistent validator settings in validator client (#12354)
* WIP

* improving proposer settings store

* WIP persistent validator settings

* WIP persistent validator settings

* changing visibility level

* fixing some deepsource issues

* fixing more deepsource issues

* fixing json marshalling

* fix linting

* fixing tests

* fixing more tests

* fixing more tests

* fixing more tests

* fixing linting

* WIP fixing unit tests

* fixing remaining db tests

* converting json to protobuf

* fixing e2e

* k8s yaml library is used directly

* fixing linting

* fixing broken unit test

* reverting changes on e2e

* fixing linting

* fixing deepsource issue

* resolving some internal comments

* resolving some comments and adding more tests

* adding more unit tests

* gaz

* fixing flaking test

* fixing unit test contamination

* fixing deepsource issue

* resolving review item

* gaz
2023-05-16 14:08:49 -05:00
Potuz
be23773924 Don't use max cover on unaggregated atts nor check subgroup of validated signatures (#12350)
* Don't use max cover on unnaggregated atts

* Do not validate signature on the attestation package

* separate nil error checks

* fix unit tests
2023-05-16 17:06:26 +00:00
terencechain
29f6de1e96 Flip build block parallel feature flag (#12408) 2023-05-16 08:43:15 -07:00
Potuz
955a21fea4 revert revert of f6764fe62b (#12399) 2023-05-16 11:50:02 +00:00
Preston Van Loon
b4f1fea029 CI: fix docker image tagging (#12407) 2023-05-16 02:10:23 +00:00
kasey
f1b88d005d fix broken slasher service init (#12405)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-15 17:00:29 -05:00
Preston Van Loon
ee612d958a Update discord invite (#12403) 2023-05-15 13:54:19 +00:00
Nishant Das
09e22538f9 Support Capella Blocks for Tool (#12402)
* fix it

* fix it
2023-05-15 17:59:02 +08:00
terencechain
3b9e974a45 Add epoch and root to not a checkpt in forkchoice log (#12400)
* Add epoch number and root in not a checkpt in forkchoice log

* Update beacon-chain/blockchain/process_attestation_helpers.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Fix test

* Fix typo

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-14 06:02:36 +00:00
Potuz
ad749a40b6 Save to checkpoint cache if the nsc hits (#12398)
* Save to checkpoint cache if the nsc hits

Also move the head check before the checkpoint cache check

* add unit test
2023-05-13 09:54:33 -03:00
terencechain
9b13454457 Metrics: Invert too late and too early att received count (#12392)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 19:45:43 +00:00
Potuz
b9917807d8 Ignore some untimely attestations (#12387)
* Ignore some untimely attestations

* correct child slot check

* consider tips as viable for checkpoints

* deal with canonical blocks

* forkchoice unit tests

* blockchain readonly beacon state

* Ignore some untimely attestations

* correct child slot check

* consider tips as viable for checkpoints

* deal with canonical blocks

* forkchoice unit tests

* blockchain readonly beacon state

* Fix AttestationTargetState mock

* Fix ineffectual assignment lint

* Fix blockchain tests

* Fix build

* Add Nil check

* add comment on lock

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 16:55:33 +00:00
Raul Jordan
e5c9387cd9 Update Github Actions Go Version (#12391)
* update github actions

* use quotes or it is go 1.2 

lol

* Update gosec

* Update gosec

* Update go lint

* fix gosec violations

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 15:51:20 +00:00
Preston Van Loon
2c3b3b802a Revert "Add a new slot ticker and use it on attestation aggregation" (#12390)
This reverts commit f6764fe62b.
2023-05-12 14:49:37 +00:00
Simon
3ef3e1d13b fix-subnets-oom (#12388)
fix-subnets-oom, close iterator after using it
2023-05-12 07:52:17 -05:00
Nick Sullivan
5c00fcb84f Fix numerous spelling error and typos in the log messages, comments, and documentation. (#12385)
* Minor typos and spelling fixes (comments, logs, & docs only, no code changes)

* Fix seplling in log message

* Additional spelling tweaks based on review from @prestonvanloon
2023-05-11 20:45:43 +00:00
Nishant Das
aef22bf54e Add In Support For Builder in E2E (#12343)
* fix it up

* add gaz

* add changes in

* finally runs

* fix it

* add progress

* add capella support

* save progress

* remove debug logs

* cleanup

* remove log

* fix flag

* remove unused lock

* gaz

* change

* fix

* lint

* james review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-05-11 11:10:29 -05:00
Potuz
f6764fe62b Add a new slot ticker and use it on attestation aggregation (#12377)
* Add slot ticker with intervals

* add flags for aggregation duration

* misspelling

* hide flags

* fix flags and default durations

* lint

* wait for initial sync

* deep source

* add log

* Preston's review

* fix error message

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-10 12:48:51 +00:00
Preston Van Loon
07db0dc448 CI: Add support for buildbuddy uploads (#12378)
* Add build metadata

* Add buildbuddy flags

* more metadata

* fix latest tag

* fix branch

* revert branch change

* touch a file to trigger build

* remove unknown command

* fix script

* Update latest_version_tag.sh
2023-05-10 12:30:40 +00:00
445 changed files with 34332 additions and 6557 deletions

View File

@@ -43,4 +43,12 @@ build --flaky_test_attempts=5
# Better caching
build:nostamp --nostamp
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
# Build metadata
build --build_metadata=ROLE=CI
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
build --workspace_status_command=./hack/workspace_status_ci.sh
# Buildbuddy
build --bes_results_url=https://app.buildbuddy.io/invocation/
build --bes_backend=grpcs://remote.buildbuddy.io

View File

@@ -26,14 +26,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.20
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: '1.20'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -43,16 +43,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.20
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: '1.20'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.50.1
version: v1.52.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,7 +62,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20'
id: go
- name: Check out code into the Go module directory

View File

@@ -4,14 +4,14 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
### Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
### Staking on Mainnet

View File

@@ -205,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.3.0"
consensus_spec_version = "v1.4.0-alpha.2"
bls_test_version = "v0.1.1"
@@ -221,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
sha256 = "bfba887cbe043907adf884cf6d18f2e8a31e34e9245397b84af1f54ed22b706a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
sha256 = "9ff77bef0ca1e39bcee2769075c89f0f91fb8f89ad38a1b3e0c31cf6732650ad",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
sha256 = "fbcc3c9898110c675e5de9c27cb667ad7cadf930db7ebb5c6bba15d7be95bf8a",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
sha256 = "9fff1bcdd0e5857797197800db091c3675b2c11b54f704fe4de1ba683bed7ba5",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

20
api/client/BUILD.bazel Normal file
View File

@@ -0,0 +1,20 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"errors.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
visibility = ["//visibility:public"],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -6,11 +6,11 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -39,6 +39,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -6,10 +6,12 @@ import (
"path"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -18,6 +20,8 @@ import (
"golang.org/x/mod/semver"
)
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
@@ -74,37 +78,40 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
if s.Slot() != s.LatestBlockHeader().Slot {
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
}
sr, err := s.HashTreeRoot(ctx)
slot := s.LatestBlockHeader().Slot
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
header := s.LatestBlockHeader()
header.StateRoot = sr[:]
br, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
realBlockRoot, err := b.Block().HashTreeRoot()
br, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
bodyRoot, err := b.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
}
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
if sbr != bodyRoot {
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", sr).
WithField("block_root", br).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,
b: b,
@@ -140,7 +147,7 @@ func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*We
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, ErrNotOK) {
if !errors.Is(err, base.ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
@@ -66,11 +66,7 @@ func TestMarshalToEnvelope(t *testing.T) {
}
func TestFallbackVersionCheck(t *testing.T) {
c := &Client{
hc: &http.Client{},
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
@@ -88,12 +84,13 @@ func TestFallbackVersionCheck(t *testing.T) {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
@@ -170,44 +167,41 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
Epoch: epoch,
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -266,42 +260,39 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -315,21 +306,16 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
@@ -448,29 +434,24 @@ func TestDownloadFinalizedData(t *testing.T) {
ms, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)

View File

@@ -5,8 +5,6 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -14,8 +12,8 @@ import (
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
@@ -54,8 +52,6 @@ const (
IdFinalized StateOrBlockId = "finalized"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
@@ -85,96 +81,22 @@ func idTemplate(ts string) func(StateOrBlockId) string {
return f
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
baseURL *url.URL
*client.Client
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
return &Client{c}, nil
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
@@ -184,7 +106,7 @@ func renderGetBlockPath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.get(ctx, blockPath, withSSZEncoding())
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
@@ -199,7 +121,7 @@ var getBlockRootTpl = idTemplate(getBlockRootPath)
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.get(ctx, rootPath)
b, err := c.Get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
@@ -222,7 +144,7 @@ var getForkTpl = idTemplate(getForkForStatePath)
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.get(ctx, getForkTpl(stateId))
body, err := c.Get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
@@ -238,7 +160,7 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.get(ctx, getForkSchedulePath)
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
@@ -256,7 +178,7 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.get(ctx, getConfigSpecPath)
body, err := c.Get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
@@ -279,7 +201,7 @@ var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
@@ -291,7 +213,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.get(ctx, getNodeVersionPath)
b, err := c.Get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
@@ -318,7 +240,7 @@ func renderGetStatePath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.get(ctx, statePath, withSSZEncoding())
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
@@ -331,7 +253,7 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.get(ctx, getWeakSubjectivityPath)
body, err := c.Get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
@@ -362,7 +284,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
@@ -372,7 +294,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.hc.Do(req)
resp, err := c.Do(req)
if err != nil {
return err
}
@@ -401,7 +323,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.get(ctx, changeBLStoExecutionPath)
body, err := c.Get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
@@ -413,23 +335,6 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`

View File

@@ -4,6 +4,7 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -17,17 +18,17 @@ func TestParseNodeVersion(t *testing.T) {
{
name: "empty string",
v: "",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "complete version",
@@ -91,7 +92,7 @@ func TestValidHostname(t *testing.T) {
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
err: client.ErrMalformedHostname,
},
{
name: "hostname with port",
@@ -132,7 +133,7 @@ func TestValidHostname(t *testing.T) {
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -1,13 +0,0 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -11,12 +11,12 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
visibility = ["//visibility:public"],
deps = [
"//config/fieldparams:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
"//network/authorization:go_default_library",
@@ -40,6 +40,7 @@ go_test(
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -1,15 +1,12 @@
package builder
import (
"math/big"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -25,6 +22,7 @@ type SignedBid interface {
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error)
Value() []byte
Pubkey() []byte
Version() int
@@ -117,6 +115,11 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// BlindedBlobsBundle --
func (b builderBid) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return nil, errors.New("blinded blobs bundle not available before Deneb")
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
@@ -162,12 +165,13 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
if b.p == nil {
return nil, errors.New("builder bid is nil")
}
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlindedBlobsBundle --
func (b builderBidCapella) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return nil, errors.New("blinded blobs bundle not available before Deneb")
}
// Version --
@@ -199,3 +203,90 @@ func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
type builderBidDeneb struct {
p *ethpb.BuilderBidDeneb
}
// WrappedBuilderBidDeneb is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidDeneb(p *ethpb.BuilderBidDeneb) (Bid, error) {
w := builderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Version --
func (b builderBidDeneb) Version() int {
return version.Deneb
}
// Value --
func (b builderBidDeneb) Value() []byte {
return b.p.Value
}
// Pubkey --
func (b builderBidDeneb) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidDeneb) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidDeneb) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToGwei(b.p.Value))
}
// BlindedBlobsBundle --
func (b builderBidDeneb) BlindedBlobsBundle() (*enginev1.BlindedBlobsBundle, error) {
return b.p.BlindedBlobsBundle, nil
}
type signedBuilderBidDeneb struct {
p *ethpb.SignedBuilderBidDeneb
}
// WrappedSignedBuilderBidDeneb is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidDeneb(p *ethpb.SignedBuilderBidDeneb) (SignedBid, error) {
w := signedBuilderBidDeneb{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidDeneb) Message() (Bid, error) {
return WrappedBuilderBidDeneb(b.p.Message)
}
// Signature --
func (b signedBuilderBidDeneb) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidDeneb) Version() int {
return version.Deneb
}
// IsNil --
func (b signedBuilderBidDeneb) IsNil() bool {
return b.p == nil
}

View File

@@ -11,7 +11,6 @@ import (
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -20,6 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
log "github.com/sirupsen/logrus"
@@ -36,7 +36,6 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -88,7 +87,7 @@ type BuilderClient interface {
NodeURL() string
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
Status(ctx context.Context) error
}
@@ -222,6 +221,16 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Deneb)):
hr := &ExecHeaderResponseDeneb{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidDeneb(p)
case strings.ToLower(version.String(version.Capella)):
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
@@ -276,79 +285,115 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
// The response is the full execution payload used to create the blinded block.
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
if !sb.IsBlinded() {
return nil, errNotBlinded
return nil, nil, errNotBlinded
}
switch sb.Version() {
case version.Bellatrix:
psb, err := sb.PbBlindedBellatrixBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get protobuf block")
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
}
ep := &ExecPayloadResponse{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
}
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
return nil, errors.New("not a bellatrix payload")
return nil, nil, errors.New("not a bellatrix payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayload(p)
payload, err := blocks.WrappedExecutionPayload(p)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Capella:
psb, err := sb.PbBlindedCapellaBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get protobuf block")
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
body, err := json.Marshal(b)
if err != nil {
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
if err != nil {
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
return nil, nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
}
ep := &ExecPayloadResponseCapella{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
}
if strings.ToLower(ep.Version) != version.String(version.Capella) {
return nil, errors.New("not a capella payload")
return nil, nil, errors.New("not a capella payload")
}
p, err := ep.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from payload")
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
return blocks.WrappedExecutionPayloadCapella(p, 0)
payload, err := blocks.WrappedExecutionPayloadCapella(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, nil, nil
case version.Deneb:
psb, err := sb.PbBlindedDenebBlock()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not get protobuf block")
}
b := &ethpb.SignedBlindedBeaconBlockAndBlobsDeneb{Block: psb, Blobs: blobs}
body, err := json.Marshal(b)
if err != nil {
return nil, nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockDeneb value body in SubmitBlindedBlockDeneb")
}
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Deneb))
}
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
ep := &ExecPayloadResponseDeneb{}
if err := json.Unmarshal(rb, ep); err != nil {
return nil, nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockDeneb response")
}
if strings.ToLower(ep.Version) != version.String(version.Deneb) {
return nil, nil, errors.New("not a deneb payload")
}
p, blobBundle, err := ep.ToProto()
if err != nil {
return nil, nil, errors.Wrapf(err, "could not extract proto message from payload")
}
payload, err := blocks.WrappedExecutionPayloadDeneb(p, 0)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not wrap execution payload in interface")
}
return payload, blobBundle, nil
default:
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
return nil, nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
}
}

View File

@@ -12,7 +12,9 @@ import (
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -125,7 +127,6 @@ func TestClient_GetHeader(t *testing.T) {
var slot types.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -236,6 +237,52 @@ func TestClient_GetHeader(t *testing.T) {
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
bundle, err := bid.BlindedBlobsBundle()
require.NoError(t, err)
require.Equal(t, len(bundle.BlobRoots) <= fieldparams.MaxBlobsPerBlock && len(bundle.BlobRoots) > 0, true)
for i := range bundle.BlobRoots {
require.Equal(t, len(bundle.BlobRoots[i]) == fieldparams.RootLength, true)
}
require.Equal(t, len(bundle.KzgCommitments) > 0, true)
for i := range bundle.KzgCommitments {
require.Equal(t, len(bundle.KzgCommitments[i]) == 48, true)
}
require.Equal(t, len(bundle.Proofs) > 0, true)
for i := range bundle.Proofs {
require.Equal(t, len(bundle.Proofs[i]) == 48, true)
}
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -277,7 +324,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb, nil)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
@@ -303,7 +350,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
require.NoError(t, err)
ep, err := c.SubmitBlindedBlock(ctx, sbb)
ep, _, err := c.SubmitBlindedBlock(ctx, sbb, nil)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
@@ -313,6 +360,40 @@ func TestSubmitBlindedBlock(t *testing.T) {
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
test := testSignedBlindedBeaconBlockAndBlobsDeneb(t)
sbb, err := blocks.NewSignedBeaconBlock(test.Block)
require.NoError(t, err)
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb, test.Blobs)
require.NoError(t, err)
withdrawals, err := ep.Withdrawals()
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
require.NotNil(t, blobBundle)
require.Equal(t, hexutil.Encode(blobBundle.Blobs[0]), hexutil.Encode(make([]byte, fieldparams.BlobLength)))
require.Equal(t, hexutil.Encode(blobBundle.KzgCommitments[0]), "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
require.Equal(t, hexutil.Encode(blobBundle.Proofs[0]), "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
})
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -330,13 +411,13 @@ func TestSubmitBlindedBlock(t *testing.T) {
}
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
require.NoError(t, err)
_, err = c.SubmitBlindedBlock(ctx, sbbb)
_, _, err = c.SubmitBlindedBlock(ctx, sbbb, nil)
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
require.NoError(t, err)
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb, nil)
require.ErrorIs(t, err, errNotBlinded)
})
}
@@ -626,6 +707,168 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
}
}
func testSignedBlindedBeaconBlockAndBlobsDeneb(t *testing.T) *eth.SignedBlindedBeaconBlockAndBlobsDeneb {
return &eth.SignedBlindedBeaconBlockAndBlobsDeneb{
Block: &eth.SignedBlindedBeaconBlockDeneb{
Block: &eth.BlindedBeaconBlockDeneb{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Body: &eth.BlindedBeaconBlockBodyDeneb{
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
Eth1Data: &eth.Eth1Data{
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DepositCount: 1,
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
ProposerSlashings: []*eth.ProposerSlashing{
{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
AttesterSlashings: []*eth.AttesterSlashing{
{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{1},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
Attestations: []*eth.Attestation{
{
AggregationBits: bitfield.Bitlist{0x01},
Data: &eth.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Source: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
Target: &eth.Checkpoint{
Epoch: 1,
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
Deposits: []*eth.Deposit{
{
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
Data: &eth.Deposit_Data{
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Amount: 1,
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
},
VoluntaryExits: []*eth.SignedVoluntaryExit{
{
Exit: &eth.VoluntaryExit{
Epoch: 1,
ValidatorIndex: 1,
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeSignature: make([]byte, 48),
SyncCommitteeBits: bitfield.Bitvector512{0x01},
},
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DataGasUsed: 1,
ExcessDataGas: 1,
},
},
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
Blobs: []*eth.SignedBlindedBlobSidecar{
{
Message: &eth.BlindedBlobSidecar{
BlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
Index: 0,
Slot: 1,
BlockParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ProposerIndex: 1,
BlobRoot: ezDecode(t, "0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"),
KzgCommitment: ezDecode(t, "0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"),
KzgProof: ezDecode(t, "0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
},
},
}
}
func TestRequestLogger(t *testing.T) {
wo := WithObserver(&requestLogger{})
c, err := NewClient("localhost:3500", wo)

View File

@@ -10,6 +10,7 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -40,8 +41,8 @@ func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedVali
}
// SubmitBlindedBlock --
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
return nil, nil
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
return nil, nil, nil
}
// Status --

View File

@@ -8,6 +8,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
@@ -346,6 +347,74 @@ func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
}, nil
}
// FromProto converts a proto execution payload type to our builder
// compatible payload type.
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayload{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = payload.Transactions[i]
}
return ExecutionPayload{
ParentHash: payload.ParentHash,
FeeRecipient: payload.FeeRecipient,
StateRoot: payload.StateRoot,
ReceiptsRoot: payload.ReceiptsRoot,
LogsBloom: payload.LogsBloom,
PrevRandao: payload.PrevRandao,
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: payload.ExtraData,
BaseFeePerGas: bFee,
BlockHash: payload.BlockHash,
Transactions: txs,
}, nil
}
// FromProtoCapella converts a proto execution payload type for capella to our
// builder compatible payload type.
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayloadCapella{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = payload.Transactions[i]
}
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
for i, w := range payload.Withdrawals {
withdrawals[i] = Withdrawal{
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
Address: w.Address,
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
}
}
return ExecutionPayloadCapella{
ParentHash: payload.ParentHash,
FeeRecipient: payload.FeeRecipient,
StateRoot: payload.StateRoot,
ReceiptsRoot: payload.ReceiptsRoot,
LogsBloom: payload.LogsBloom,
PrevRandao: payload.PrevRandao,
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: payload.ExtraData,
BaseFeePerGas: bFee,
BlockHash: payload.BlockHash,
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
type ExecHeaderResponseCapella struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
@@ -956,6 +1025,296 @@ func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
})
}
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
type ExecHeaderResponseDeneb struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidDeneb `json:"message"`
} `json:"data"`
}
// ToProto creates a SignedBuilderBidDeneb Proto from ExecHeaderResponseDeneb.
func (ehr *ExecHeaderResponseDeneb) ToProto() (*eth.SignedBuilderBidDeneb, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBidDeneb{
Message: bb,
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
}, nil
}
// ToProto creates a BuilderBidDeneb Proto from BuilderBidDeneb.
func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
bundle, err := bb.BlindedBlobsBundle.ToProto()
if err != nil {
return nil, err
}
return &eth.BuilderBidDeneb{
Header: header,
BlindedBlobsBundle: bundle,
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}
// BuilderBidDeneb is a field of ExecHeaderResponseDeneb.
type BuilderBidDeneb struct {
Header *ExecutionPayloadHeaderDeneb `json:"header"`
BlindedBlobsBundle *BlindedBlobsBundle `json:"blinded_blobs_bundle"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// BlindedBlobsBundle is a field of BuilderBidDeneb and represents the blinded blobs of the associated header.
type BlindedBlobsBundle struct {
KzgCommitments []hexutil.Bytes `json:"commitments"`
Proofs []hexutil.Bytes `json:"proofs"`
BlobRoots []hexutil.Bytes `json:"blob_roots"`
}
// ToProto creates a BlindedBlobsBundle Proto from BlindedBlobsBundle.
func (r *BlindedBlobsBundle) ToProto() (*v1.BlindedBlobsBundle, error) {
kzg := make([][]byte, len(r.KzgCommitments))
for i := range kzg {
kzg[i] = bytesutil.SafeCopyBytes(r.KzgCommitments[i])
}
proofs := make([][]byte, len(r.Proofs))
for i := range proofs {
proofs[i] = bytesutil.SafeCopyBytes(r.Proofs[i])
}
blobRoots := make([][]byte, len(r.BlobRoots))
for i := range blobRoots {
blobRoots[i] = bytesutil.SafeCopyBytes(r.BlobRoots[i])
}
return &v1.BlindedBlobsBundle{
KzgCommitments: kzg,
Proofs: proofs,
BlobRoots: blobRoots,
}, nil
}
// ExecutionPayloadHeaderDeneb a field part of the BuilderBidDeneb.
type ExecutionPayloadHeaderDeneb struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
DataGasUsed Uint64String `json:"data_gas_used"` // new in deneb
ExcessDataGas Uint64String `json:"excess_data_gas"` // new in deneb
*v1.ExecutionPayloadHeaderDeneb
}
// MarshalJSON returns a JSON byte array representing the ExecutionPayloadHeaderDeneb struct.
func (h *ExecutionPayloadHeaderDeneb) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeaderDeneb
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderDeneb.BaseFeePerGas)
if err != nil {
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
}
return json.Marshal(&MarshalCaller{
ParentHash: h.ExecutionPayloadHeaderDeneb.ParentHash,
FeeRecipient: h.ExecutionPayloadHeaderDeneb.FeeRecipient,
StateRoot: h.ExecutionPayloadHeaderDeneb.StateRoot,
ReceiptsRoot: h.ExecutionPayloadHeaderDeneb.ReceiptsRoot,
LogsBloom: h.ExecutionPayloadHeaderDeneb.LogsBloom,
PrevRandao: h.ExecutionPayloadHeaderDeneb.PrevRandao,
BlockNumber: Uint64String(h.ExecutionPayloadHeaderDeneb.BlockNumber),
GasLimit: Uint64String(h.ExecutionPayloadHeaderDeneb.GasLimit),
GasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.GasUsed),
Timestamp: Uint64String(h.ExecutionPayloadHeaderDeneb.Timestamp),
ExtraData: h.ExecutionPayloadHeaderDeneb.ExtraData,
BaseFeePerGas: baseFeePerGas,
BlockHash: h.ExecutionPayloadHeaderDeneb.BlockHash,
TransactionsRoot: h.ExecutionPayloadHeaderDeneb.TransactionsRoot,
WithdrawalsRoot: h.ExecutionPayloadHeaderDeneb.WithdrawalsRoot,
DataGasUsed: Uint64String(h.ExecutionPayloadHeaderDeneb.DataGasUsed),
ExcessDataGas: Uint64String(h.ExecutionPayloadHeaderDeneb.ExcessDataGas),
})
}
// UnmarshalJSON takes in a byte array and unmarshals the value into ExecutionPayloadHeaderDeneb.
func (h *ExecutionPayloadHeaderDeneb) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeaderDeneb
uc := &UnmarshalCaller{}
if err := json.Unmarshal(b, uc); err != nil {
return err
}
ep := ExecutionPayloadHeaderDeneb(*uc)
*h = ep
var err error
h.ExecutionPayloadHeaderDeneb, err = h.ToProto()
return err
}
// ToProto returns a ExecutionPayloadHeaderDeneb Proto object.
func (h *ExecutionPayloadHeaderDeneb) ToProto() (*v1.ExecutionPayloadHeaderDeneb, error) {
return &v1.ExecutionPayloadHeaderDeneb{
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
DataGasUsed: uint64(h.DataGasUsed),
ExcessDataGas: uint64(h.ExcessDataGas),
}, nil
}
// ExecPayloadResponseDeneb the response to the build API /eth/v1/builder/blinded_blocks that includes the version, execution payload object , and blobs bundle object.
type ExecPayloadResponseDeneb struct {
Version string `json:"version"`
Data *ExecutionPayloadDenebAndBlobsBundle `json:"data"`
}
// ExecutionPayloadDenebAndBlobsBundle the main field used in ExecPayloadResponseDeneb.
type ExecutionPayloadDenebAndBlobsBundle struct {
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
BlobsBundle *BlobsBundle `json:"blobs_bundle"`
}
// ExecutionPayloadDeneb is a field used in ExecutionPayloadDenebAndBlobsBundle.
type ExecutionPayloadDeneb struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
Withdrawals []Withdrawal `json:"withdrawals"`
DataGasUsed Uint64String `json:"data_gas_used"` // new in deneb
ExcessDataGas Uint64String `json:"excess_data_gas"` // new in deneb
}
// BlobsBundle is a field in ExecutionPayloadDenebAndBlobsBundle.
type BlobsBundle struct {
Commitments []hexutil.Bytes `json:"commitments"`
Proofs []hexutil.Bytes `json:"proofs"`
Blobs []hexutil.Bytes `json:"blobs"`
}
// ToProto returns a BlobsBundle Proto.
func (b BlobsBundle) ToProto() (*v1.BlobsBundle, error) {
commitments := make([][]byte, len(b.Commitments))
for i := range b.Commitments {
if len(b.Commitments[i]) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("commitment length %d is not %d", len(b.Commitments[i]), fieldparams.BLSPubkeyLength)
}
commitments[i] = bytesutil.SafeCopyBytes(b.Commitments[i])
}
proofs := make([][]byte, len(b.Proofs))
for i := range b.Proofs {
if len(b.Proofs[i]) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("proof length %d is not %d", len(b.Proofs[i]), fieldparams.BLSPubkeyLength)
}
proofs[i] = bytesutil.SafeCopyBytes(b.Proofs[i])
}
if len(b.Blobs) > fieldparams.MaxBlobsPerBlock {
return nil, fmt.Errorf("blobs length %d is more than max %d", len(b.Blobs), fieldparams.MaxBlobsPerBlock)
}
blobs := make([][]byte, len(b.Blobs))
for i := range b.Blobs {
if len(b.Blobs[i]) != fieldparams.BlobLength {
return nil, fmt.Errorf("blob length %d is not %d", len(b.Blobs[i]), fieldparams.BlobLength)
}
blobs[i] = bytesutil.SafeCopyBytes(b.Blobs[i])
}
return &v1.BlobsBundle{
KzgCommitments: commitments,
Proofs: proofs,
Blobs: blobs,
}, nil
}
// ToProto returns ExecutionPayloadDeneb Proto and BlobsBundle Proto separately.
func (r *ExecPayloadResponseDeneb) ToProto() (*v1.ExecutionPayloadDeneb, *v1.BlobsBundle, error) {
if r.Data == nil {
return nil, nil, errors.New("data field in response is empty")
}
payload, err := r.Data.ExecutionPayload.ToProto()
if err != nil {
return nil, nil, err
}
bundle, err := r.Data.BlobsBundle.ToProto()
if err != nil {
return nil, nil, err
}
return payload, bundle, nil
}
// ToProto returns the ExecutionPayloadDeneb Proto.
func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
}
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
for i, w := range p.Withdrawals {
withdrawals[i] = &v1.Withdrawal{
Index: w.Index.Uint64(),
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
Address: bytesutil.SafeCopyBytes(w.Address),
Amount: w.Amount.Uint64(),
}
}
return &v1.ExecutionPayloadDeneb{
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
Transactions: txs,
Withdrawals: withdrawals,
DataGasUsed: uint64(p.DataGasUsed),
ExcessDataGas: uint64(p.ExcessDataGas),
}, nil
}
// ErrorMessage is a JSON representation of the builder API's returned error message.
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`

View File

@@ -14,6 +14,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/proto"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -116,6 +117,47 @@ var testExampleHeaderResponseCapella = `{
}
}`
var testExampleHeaderResponseDeneb = `{
"version": "deneb",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"data_gas_used": "1",
"excess_data_gas": "1"
},
"blinded_blobs_bundle": {
"commitments": [
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
],
"proofs": [
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
],
"blob_roots": [
"0x24564723180fcb3d994104538d351c8dcbde12d541676bb736cf678018ca4739"
]
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
var testExampleHeaderResponseUnknownVersion = `{
"version": "bad",
"data": {
@@ -518,6 +560,51 @@ var testExampleExecutionPayloadCapella = `{
}
}`
var testExampleExecutionPayloadDeneb = fmt.Sprintf(`{
"version": "deneb",
"data": {
"execution_payload":{
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions": [
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
],
"withdrawals": [
{
"index": "1",
"validator_index": "1",
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
"amount": "1"
}
],
"data_gas_used": "2",
"excess_data_gas": "3"
},
"blobs_bundle": {
"commitments": [
"0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f"
],
"proofs": [
"0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a"
],
"blobs": [
"%s"
]
}
}
}`, hexutil.Encode(make([]byte, fieldparams.BlobLength)))
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
epr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
@@ -689,6 +776,107 @@ func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
assert.Equal(t, uint64(1), w.Amount.Uint64())
}
func TestExecutionPayloadResponseDenebUnmarshal(t *testing.T) {
epr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
cases := []struct {
expected string
actual string
name string
}{
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ParentHash),
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
},
{
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
actual: hexutil.Encode(epr.Data.ExecutionPayload.FeeRecipient),
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.StateRoot),
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ReceiptsRoot),
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
},
{
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
actual: hexutil.Encode(epr.Data.ExecutionPayload.LogsBloom),
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.PrevRandao),
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BlockNumber),
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasLimit),
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.GasUsed),
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
},
{
expected: "1",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.Timestamp),
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.ExtraData),
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
},
{
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.BaseFeePerGas),
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
},
{
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
actual: hexutil.Encode(epr.Data.ExecutionPayload.BlockHash),
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
},
{
expected: "2",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.DataGasUsed),
name: "ExecPayloadResponse.ExecutionPayload.DataGasUsed",
},
{
expected: "3",
actual: fmt.Sprintf("%d", epr.Data.ExecutionPayload.ExcessDataGas),
name: "ExecPayloadResponse.ExecutionPayload.ExcessDataGas",
},
}
for _, c := range cases {
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
}
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Transactions))
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
require.Equal(t, txHash, hexutil.Encode(epr.Data.ExecutionPayload.Transactions[0]))
require.Equal(t, 1, len(epr.Data.ExecutionPayload.Withdrawals))
w := epr.Data.ExecutionPayload.Withdrawals[0]
assert.Equal(t, uint64(1), w.Index.Uint64())
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
assert.Equal(t, uint64(1), w.Amount.Uint64())
assert.Equal(t, uint64(2), uint64(epr.Data.ExecutionPayload.DataGasUsed))
assert.Equal(t, uint64(3), uint64(epr.Data.ExecutionPayload.ExcessDataGas))
}
func TestExecutionPayloadResponseToProto(t *testing.T) {
hr := &ExecPayloadResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
@@ -796,6 +984,85 @@ func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
}
func TestExecutionPayloadResponseDenebToProto(t *testing.T) {
hr := &ExecPayloadResponseDeneb{}
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), hr))
p, blobsBundle, err := hr.ToProto()
require.NoError(t, err)
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
require.NoError(t, err)
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
require.NoError(t, err)
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
require.NoError(t, err)
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
require.NoError(t, err)
txList := [][]byte{tx}
address, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943")
require.NoError(t, err)
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
expected := &v1.ExecutionPayloadDeneb{
ParentHash: parentHash,
FeeRecipient: feeRecipient,
StateRoot: stateRoot,
ReceiptsRoot: receiptsRoot,
LogsBloom: logsBloom,
PrevRandao: prevRandao,
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: extraData,
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: blockHash,
Transactions: txList,
Withdrawals: []*v1.Withdrawal{
{
Index: 1,
ValidatorIndex: 1,
Address: address,
Amount: 1,
},
},
DataGasUsed: 2,
ExcessDataGas: 3,
}
require.DeepEqual(t, expected, p)
commitment, err := hexutil.Decode("0x8dab030c51e16e84be9caab84ee3d0b8bbec1db4a0e4de76439da8424d9b957370a10a78851f97e4b54d2ce1ab0d686f")
require.NoError(t, err)
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
require.NoError(t, err)
expectedBlobs := &v1.BlobsBundle{
KzgCommitments: [][]byte{
commitment,
},
Proofs: [][]byte{
proof,
},
Blobs: [][]byte{
make([]byte, fieldparams.BlobLength),
},
}
require.DeepEqual(t, blobsBundle, expectedBlobs)
}
func pbEth1Data() *eth.Eth1Data {
return &eth.Eth1Data{
DepositRoot: make([]byte, 32),
@@ -1026,6 +1293,30 @@ func pbExecutionPayloadHeaderCapella(t *testing.T) *v1.ExecutionPayloadHeaderCap
}
}
func pbExecutionPayloadHeaderDeneb(t *testing.T) *v1.ExecutionPayloadHeaderDeneb {
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
require.NoError(t, err)
return &v1.ExecutionPayloadHeaderDeneb{
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
BaseFeePerGas: bfpg.SSZBytes(),
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
DataGasUsed: 1,
ExcessDataGas: 1,
}
}
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeader{
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
@@ -1046,6 +1337,16 @@ func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
require.Equal(t, expected, string(b))
}
func TestExecutionPayloadHeaderDeneb_MarshalJSON(t *testing.T) {
h := &ExecutionPayloadHeaderDeneb{
ExecutionPayloadHeaderDeneb: pbExecutionPayloadHeaderDeneb(t),
}
b, err := json.Marshal(h)
require.NoError(t, err)
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","data_gas_used":"1","excess_data_gas":"1"}`
require.Equal(t, expected, string(b))
}
var testBuilderBid = `{
"version":"bellatrix",
"data":{

97
api/client/client.go Normal file
View File

@@ -0,0 +1,97 @@
package client
import (
"context"
"io"
"net"
"net/http"
"net/url"
"github.com/pkg/errors"
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
// Token returns the bearer token used for jwt authentication
func (c *Client) Token() string {
return c.token
}
// BaseURL returns the base url of the client
func (c *Client) BaseURL() *url.URL {
return c.baseURL
}
// Do execute the request against the http client
func (c *Client) Do(req *http.Request) (*http.Response, error) {
return c.hc.Do(req)
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}
return b, nil
}

48
api/client/client_test.go Normal file
View File

@@ -0,0 +1,48 @@
package client
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}
func TestWithAuthenticationToken(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
require.NoError(t, err)
require.Equal(t, cl.Token(), "my token")
}
func TestBaseURL(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500")
require.NoError(t, err)
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
require.Equal(t, "3500", cl.BaseURL().Port())
}

40
api/client/errors.go Normal file
View File

@@ -0,0 +1,40 @@
package client
import (
"fmt"
"io"
"net/http"
"github.com/pkg/errors"
)
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}

48
api/client/options.go Normal file
View File

@@ -0,0 +1,48 @@
package client
import (
"fmt"
"net/http"
"time"
)
// ReqOption is a request functional option.
type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
func WithRoundTripper(t http.RoundTripper) ClientOpt {
return func(c *Client) {
c.hc.Transport = t
}
}
// WithAuthenticationToken sets an oauth token to be used.
func WithAuthenticationToken(token string) ClientOpt {
return func(c *Client) {
c.token = token
}
}

View File

@@ -0,0 +1,13 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,121 @@
package validator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
)
const (
localKeysPath = "/eth/v1/keystores"
remoteKeysPath = "/eth/v1/remotekeys"
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
)
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
if err != nil {
return nil, err
}
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
if err != nil {
return nil, err
}
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Keystores {
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
}
for index := range jsonremote.Keystores {
hexKeys[jsonremote.Keystores[index].Pubkey] = true
}
keys := make([]string, 0)
for k := range hexKeys {
keys = append(keys, k)
}
return keys, nil
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}
return jsonlocal, nil
}
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
return nil, err
}
}
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
if len(remoteBytes) != 0 {
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
return nil, errors.Wrap(err, "failed to parse remote keystore list")
}
}
return jsonremote, nil
}
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
feeRecipients := make([]string, len(validators))
for index, validator := range validators {
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
}
if feejson.Data == nil {
continue
}
feeRecipients[index] = feejson.Data.Ethaddress
}
return feeRecipients, nil
}
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
if err := json.Unmarshal(b, feejson); err != nil {
return nil, errors.Wrap(err, "failed to parse fee recipient")
}
return feejson, nil
}

View File

@@ -144,6 +144,7 @@ func (f *Feed) Send(value interface{}) (nsent int) {
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()

View File

@@ -32,6 +32,8 @@ func TestFeedPanics(t *testing.T) {
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -380,6 +381,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -25,8 +25,11 @@ var (
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -207,7 +207,18 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
}
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
var lastValidHash []byte
if blk.Version() >= version.Deneb {
_, err = blk.Block().Body().BlobKzgCommitments()
if err != nil {
return false, errors.Wrap(invalidBlock{error: err}, "could not get blob kzg commitments")
}
// TODO: Convert kzg commitment to version hashes and feed to below
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{})
} else {
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, [][32]byte{} /*empty version hashes before Deneb*/)
}
switch err {
case nil:
newPayloadValidNodeCount.Inc()
@@ -305,7 +316,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella:
case version.Capella, version.Deneb:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")

View File

@@ -827,6 +827,42 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
require.Equal(t, 0, len(a))
}
func Test_GetPayloadAttributeDeneb(t *testing.T) {
service, tr := minimalTestService(t, WithProposerIdsCache(cache.NewProposerPayloadIDsCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
require.Equal(t, primitives.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := primitives.ValidatorIndex(1)
slot := primitives.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
a, err := attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []primitives.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.OverrideBeaconConfig(params.MainnetConfig())

View File

@@ -53,7 +53,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
@@ -157,7 +157,11 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, err
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, slot)
if err != nil {
return nil, err
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -15,7 +16,7 @@ import (
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Current period
@@ -38,7 +39,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
@@ -66,7 +67,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
@@ -81,7 +82,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())

View File

@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
case err != nil:
return err
default:

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -18,7 +19,17 @@ import (
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
@@ -32,7 +43,36 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
if cachedState != nil && !cachedState.IsNil() {
return cachedState, nil
}
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
// but is cheap
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not compute epoch start")
}
cachedState = transition.NextSlotState(c.Root, slot)
if cachedState != nil && !cachedState.IsNil() {
if cachedState.Slot() != slot {
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}
}
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return cachedState, nil
}
// Do not process attestations for old non viable checkpoints otherwise
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
if err != nil {
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
}
if !ok {
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
}
// Fallback to state regeneration.
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)

View File

@@ -27,11 +27,20 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
cp := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{Root: r[:]}
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -42,7 +51,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 2
blkWithValidState.Block.Slot = 32
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
@@ -57,6 +66,10 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
service.head = &head{
state: st,
}
tests := []struct {
name string
a *ethpb.Attestation
@@ -67,11 +80,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
@@ -160,6 +168,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
@@ -167,8 +178,17 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
s2, err := service.getAttPreState(ctx, cp2)
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s2, err = service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
s1, err = service.getAttPreState(ctx, cp1)
@@ -187,6 +207,10 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
@@ -195,11 +219,18 @@ func TestStore_SaveCheckpointState(t *testing.T) {
func TestStore_UpdateCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -209,8 +240,16 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
epoch = 2
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
blk = util.NewBeaconBlock()
blk.Block.Slot = 64
r2, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r2[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)
@@ -289,3 +328,22 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestGetAttPreState_HeadState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
_, err = service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
}

View File

@@ -285,7 +285,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
@@ -483,14 +483,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
if err != nil {
return err
}
@@ -651,26 +651,24 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) spawnLateBlockTasksLoop() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
func (s *Service) runLateBlockTasks() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("runLateBlockTasks encountered an error waiting for initialization")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}()
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
@@ -685,12 +683,26 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
headRoot := s.headRoot()
headState := s.headState(ctx)
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
@@ -698,8 +710,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
@@ -709,11 +719,4 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}

View File

@@ -636,7 +636,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s))
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
}
@@ -1875,9 +1875,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
}

View File

@@ -26,7 +26,7 @@ const reorgLateBlockCountAttestations = 2 * time.Second
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
}
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
@@ -37,7 +37,7 @@ type AttestationReceiver interface {
}
// AttestationTargetState returns the pre state of attestation.
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
ss, err := slots.EpochStart(target.Epoch)
if err != nil {
return nil, err
@@ -45,6 +45,9 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.getAttPreState(ctx, target)
}

View File

@@ -26,6 +26,7 @@ type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock, blkRoots [][32]byte) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
@@ -150,6 +151,11 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
return s.hasBlockInInitSyncOrDB(ctx, root)
}
// RecentBlockSlot returns block slot form fork choice store
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
return s.cfg.ForkChoiceStore.Slot(root)
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.cfg.ForkChoiceStore.Lock()

View File

@@ -130,7 +130,7 @@ func (s *Service) Start() {
}
}
s.spawnProcessAttestationsRoutine()
s.spawnLateBlockTasksLoop()
go s.runLateBlockTasks()
}
// Stop the blockchain service's main event loop and associated goroutines.

View File

@@ -52,6 +52,11 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
return nil
}
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}

View File

@@ -69,6 +69,7 @@ type ChainService struct {
OptimisticCheckRootReceived [32]byte
FinalizedRoots map[[32]byte]bool
OptimisticRoots map[[32]byte]bool
BlockSlot primitives.Slot
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -320,7 +321,7 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
}
// AttestationTargetState mocks AttestationTargetState method in chain service.
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
return s.State, nil
}
@@ -389,6 +390,11 @@ func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
return s.InitSyncBlockRoots[rt]
}
// RecentBlockSlot mocks the same method in the chain service.
func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
return s.BlockSlot, nil
}
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}

View File

@@ -15,10 +15,12 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/db:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -2,6 +2,7 @@ package builder
import (
"context"
"fmt"
"reflect"
"time"
@@ -10,10 +11,12 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -24,7 +27,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
// BlockBuilder defines the interface for interacting with the block builder
type BlockBuilder interface {
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error)
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
@@ -87,7 +90,7 @@ func (s *Service) Stop() error {
}
// SubmitBlindedBlock submits a blinded block to the builder relay network.
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blobs []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlock")
defer span.End()
start := time.Now()
@@ -95,10 +98,13 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
}()
if s.c == nil {
return nil, ErrNoBuilder
return nil, nil, ErrNoBuilder
}
if uint64(len(blobs)) > fieldparams.MaxBlobsPerBlock {
return nil, nil, fmt.Errorf("blob count %d beyond max limit of %d", len(blobs), fieldparams.MaxBlobsPerBlock)
}
return s.c.SubmitBlindedBlock(ctx, b)
return s.c.SubmitBlindedBlock(ctx, b, blobs)
}
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.

View File

@@ -3,6 +3,7 @@ package builder
import (
"context"
"testing"
"time"
buildertesting "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
@@ -38,6 +39,21 @@ func Test_RegisterValidator(t *testing.T) {
assert.Equal(t, true, builder.RegisteredVals[pubkey])
}
func Test_RegisterValidator_WithCache(t *testing.T) {
ctx := context.Background()
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
require.NoError(t, err)
pubkey := bytesutil.ToBytes48([]byte("pubkey"))
var feeRecipient [20]byte
reg := &eth.ValidatorRegistrationV1{Pubkey: pubkey[:], Timestamp: uint64(time.Now().UTC().Unix()), FeeRecipient: feeRecipient[:]}
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: reg}}))
registration, err := s.registrationCache.RegistrationByIndex(0)
require.NoError(t, err)
require.DeepEqual(t, reg, registration)
}
func Test_BuilderMethodsWithouClient(t *testing.T) {
s, err := NewService(context.Background())
require.NoError(t, err)
@@ -46,7 +62,7 @@ func Test_BuilderMethodsWithouClient(t *testing.T) {
_, err = s.GetHeader(context.Background(), 0, [32]byte{}, [48]byte{})
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
_, err = s.SubmitBlindedBlock(context.Background(), nil)
_, _, err = s.SubmitBlindedBlock(context.Background(), nil, nil)
assert.ErrorContains(t, ErrNoBuilder.Error(), err)
err = s.RegisterValidator(context.Background(), nil)

View File

@@ -26,9 +26,12 @@ type MockBuilderService struct {
HasConfigured bool
Payload *v1.ExecutionPayload
PayloadCapella *v1.ExecutionPayloadCapella
PayloadDeneb *v1.ExecutionPayloadDeneb
BlobBundle *v1.BlobsBundle
ErrSubmitBlindedBlock error
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
@@ -41,23 +44,34 @@ func (s *MockBuilderService) Configured() bool {
}
// SubmitBlindedBlock for mocking.
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock, _ []*ethpb.SignedBlindedBlobSidecar) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
if s.Payload != nil {
w, err := blocks.WrappedExecutionPayload(s.Payload)
if err != nil {
return nil, errors.Wrap(err, "could not wrap payload")
return nil, nil, errors.Wrap(err, "could not wrap payload")
}
return w, s.ErrSubmitBlindedBlock
return w, nil, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if s.PayloadCapella != nil {
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, 0)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
}
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, 0)
if err != nil {
return nil, errors.Wrap(err, "could not wrap capella payload")
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}
return w, s.ErrSubmitBlindedBlock
return w, s.BlobBundle, s.ErrSubmitBlindedBlock
}
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}

View File

@@ -34,6 +34,7 @@ go_library(
deps = [
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
@@ -87,7 +88,6 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
// Mimick finalized deposit trie fetch.
// Mimic finalized deposit trie fetch.
fd := dc.FinalizedDeposits(context.Background())
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
insertIndex := fd.MerkleTrieIndex + 1

View File

@@ -4,35 +4,41 @@ import (
"bytes"
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
const keyLength = 40
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
// The key is the concatenation of the slot and the block root.
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[[40]byte][vpIdsLength]byte
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[[40]byte][vpIdsLength]byte),
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r [32]byte) (primitives.ValidatorIndex, [8]byte, bool) {
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
slot primitives.Slot,
r [fieldparams.RootLength]byte,
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
if !ok {
return 0, [8]byte{}, false
return 0, [pIdLength]byte{}, false
}
vId := ids[:vIdLength]
@@ -43,8 +49,13 @@ func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot, vId primitives.ValidatorIndex, pId [8]byte, r [32]byte) {
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
slot primitives.Slot,
vId primitives.ValidatorIndex,
pId [pIdLength]byte,
r [fieldparams.RootLength]byte,
) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
@@ -63,7 +74,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot,
}
}
// PrunePayloadIDs removes the payload id entries that's current than input slot.
// PrunePayloadIDs removes the payload ID entries older than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
f.Lock()
defer f.Unlock()
@@ -76,8 +87,8 @@ func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
}
}
func idKey(slot primitives.Slot, r [32]byte) [40]byte {
var k [40]byte
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
var k [keyLength]byte
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
return k
}

View File

@@ -3,15 +3,11 @@ package cache
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -38,33 +34,10 @@ func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIn
regCache.lock.RUnlock()
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
if err != nil {
return nil, errors.Wrapf(err, "failed to check registration expiration")
}
if isExpired {
regCache.lock.RUnlock()
regCache.lock.Lock()
defer regCache.lock.Unlock()
delete(regCache.indexToRegistration, id)
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
regCache.lock.RUnlock()
return v, nil
}
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
// safely convert unint64 to int64
i, err := math.Int(ts)
if err != nil {
return false, err
}
expiryDuration := params.BeaconConfig().RegistrationDuration
// registered time + expiration duration < current time = expired
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
}
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")

View File

@@ -6,15 +6,12 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestRegistrationCache(t *testing.T) {
hook := logTest.NewGlobal()
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex := primitives.ValidatorIndex(1)
@@ -31,29 +28,14 @@ func TestRegistrationCache(t *testing.T) {
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
t.Run("Registration expired", func(t *testing.T) {
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
_, err := cache.RegistrationByIndex(validatorIndex2)
require.ErrorContains(t, "no validator registered", err)
require.LogsContain(t, hook, "expired")
})
t.Run("Registration close to expiration still passes", func(t *testing.T) {
t.Run("successfully updates", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
@@ -62,21 +44,3 @@ func TestRegistrationCache(t *testing.T) {
require.Equal(t, string(reg.Pubkey), string(pubkey))
})
}
func Test_RegistrationTimeStampExpired(t *testing.T) {
// expiration set at 3 epochs
t.Run("expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, true, isExpired)
})
t.Run("is not expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, false, isExpired)
})
}

View File

@@ -145,6 +145,40 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
},
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateDeneb:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyDeneb{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
default:
return nil, ErrUnrecognizedState
}

View File

@@ -61,7 +61,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
return false, nil
case err != nil:
return false, err

View File

@@ -0,0 +1,30 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["upgrade.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["upgrade_test.go"],
deps = [
":go_default_library",
"//beacon-chain/core/time:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -0,0 +1,115 @@
package deneb
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// UpgradeToDeneb updates inputs a generic state to return the version Deneb state.
func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) {
epoch := time.CurrentEpoch(state)
currentSyncCommittee, err := state.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := state.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := state.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := state.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := state.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := state.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := state.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := state.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := state.HistoricalSummaries()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateDeneb{
GenesisTime: state.GenesisTime(),
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
Slot: state.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: state.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: epoch,
},
LatestBlockHeader: state.LatestBlockHeader(),
BlockRoots: state.BlockRoots(),
StateRoots: state.StateRoots(),
HistoricalRoots: [][]byte{},
Eth1Data: state.Eth1Data(),
Eth1DataVotes: state.Eth1DataVotes(),
Eth1DepositIndex: state.Eth1DepositIndex(),
Validators: state.Validators(),
Balances: state.Balances(),
RandaoMixes: state.RandaoMixes(),
Slashings: state.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: state.JustificationBits(),
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: state.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
ExcessDataGas: 0,
DataGasUsed: 0,
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
}
return state_native.InitializeFromProtoUnsafeDeneb(s)
}

View File

@@ -0,0 +1,94 @@
package deneb_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestUpgradeToDeneb(t *testing.T) {
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
mSt, err := deneb.UpgradeToDeneb(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
numValidators := mSt.NumValidators()
p, err := mSt.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
p, err = mSt.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
s, err := mSt.InactivityScores()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
f := mSt.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().DenebForkVersion,
Epoch: time.CurrentEpoch(st),
}, f)
csc, err := mSt.CurrentSyncCommittee()
require.NoError(t, err)
psc, err := preForkState.CurrentSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, csc)
nsc, err := mSt.NextSyncCommittee()
require.NoError(t, err)
psc, err = preForkState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, nsc)
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
txRoot, err := prevHeader.TransactionsRoot()
require.NoError(t, err)
wdRoot, err := prevHeader.WithdrawalsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),
ReceiptsRoot: prevHeader.ReceiptsRoot(),
LogsBloom: prevHeader.LogsBloom(),
PrevRandao: prevHeader.PrevRandao(),
BlockNumber: prevHeader.BlockNumber(),
GasLimit: prevHeader.GasLimit(),
GasUsed: prevHeader.GasUsed(),
Timestamp: prevHeader.Timestamp(),
BaseFeePerGas: prevHeader.BaseFeePerGas(),
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
}
require.DeepEqual(t, wanted, protoHeader)
}

View File

@@ -68,7 +68,6 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
@@ -66,25 +65,6 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
for i := 0; i < len(sigs); i++ {
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
if err != nil {
return nil, err
}
}
return bls.AggregateSignatures(sigs), nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation *ethpb.Attestation) bool {
@@ -183,11 +163,11 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooEarlyCount.Inc()
attReceivedTooLateCount.Inc()
return attError
}
if attTime.After(upperBounds) {
attReceivedTooLateCount.Inc()
attReceivedTooEarlyCount.Inc()
return attError
}
return nil

View File

@@ -10,8 +10,6 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -45,44 +43,6 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
}
func TestAttestation_AggregateSignature(t *testing.T) {
t.Run("verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
require.NoError(t, err)
assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify")
})
t.Run("not verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg)
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
require.NoError(t, err)
assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)), "Signature not suppose to verify")
})
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
// Create 10 committees
committeeCount := uint64(10)

View File

@@ -81,6 +81,15 @@ func CanUpgradeToCapella(slot primitives.Slot) bool {
return epochStart && capellaEpoch
}
// CanUpgradeToDeneb returns true if the input `slot` can upgrade to Deneb.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == DENEB_FORK_EPOCH
func CanUpgradeToDeneb(slot primitives.Slot) bool {
epochStart := slots.IsEpochStart(slot)
DenebEpoch := slots.ToEpoch(slot) == params.BeaconConfig().DenebForkEpoch
return epochStart && DenebEpoch
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//

View File

@@ -298,3 +298,38 @@ func TestCanUpgradeToCapella(t *testing.T) {
})
}
}
func TestCanUpgradeToDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.DenebForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not deneb epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "deneb epoch",
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToDeneb(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToDeneb() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -18,6 +18,7 @@ go_library(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/capella:go_default_library",
"//beacon-chain/core/deneb:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/execution:go_default_library",

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/capella"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/deneb"
e "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/execution"
@@ -269,28 +270,10 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
return nil, errors.Wrap(err, "failed to increment state slot")
}
if time.CanUpgradeToAltair(state.Slot()) {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToBellatrix(state.Slot()) {
state, err = execution.UpgradeToBellatrix(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToCapella(state.Slot()) {
state, err = capella.UpgradeToCapella(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
state, err = UpgradeState(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "failed to upgrade state")
}
}
@@ -301,6 +284,45 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
return state, nil
}
// UpgradeState upgrades the state to the next version if possible.
func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.UpgradeState")
defer span.End()
var err error
if time.CanUpgradeToAltair(state.Slot()) {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToBellatrix(state.Slot()) {
state, err = execution.UpgradeToBellatrix(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToCapella(state.Slot()) {
state, err = capella.UpgradeToCapella(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
if time.CanUpgradeToDeneb(state.Slot()) {
state, err = deneb.UpgradeToDeneb(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
return state, nil
}
// VerifyOperationLengths verifies that block operation lengths are valid.
func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
if err := blocks.BeaconBlockIsNil(b); err != nil {

View File

@@ -256,7 +256,7 @@ func ProcessOperationsNoVerifyAttsSigs(
if err != nil {
return nil, err
}
case version.Altair, version.Bellatrix, version.Capella:
case version.Altair, version.Bellatrix, version.Capella, version.Deneb:
state, err = altairOperations(ctx, state, signedBeaconBlock)
if err != nil {
return nil, err

View File

@@ -629,6 +629,20 @@ func TestProcessSlots_ThroughBellatrixEpoch(t *testing.T) {
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlots_ThroughDenebEpoch(t *testing.T) {
transition.SkipSlotCache.Disable()
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig()
conf.DenebForkEpoch = 5
params.OverrideBeaconConfig(conf)
st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee)
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
require.NoError(t, err)
require.Equal(t, version.Deneb, st.Version())
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
r := []byte{'a'}

View File

@@ -150,7 +150,7 @@ func TestSlashValidator_OK(t *testing.T) {
maxBalance := params.BeaconConfig().MaxEffectiveBalance
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
bal, err := state.BalanceAtIndex(proposer)

View File

@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
// Fee recipients operations.
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// Blob operations.
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
// Blob operations.
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
}

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"archived_point.go",
"backup.go",
"blob.go",
"blocks.go",
"checkpoint.go",
"deposit_contract.go",
@@ -38,6 +39,7 @@ go_library(
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -74,6 +76,7 @@ go_test(
srcs = [
"archived_point_test.go",
"backup_test.go",
"blob_test.go",
"blocks_test.go",
"checkpoint_test.go",
"deposit_contract_test.go",
@@ -110,6 +113,7 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",
"//testing/assert:go_default_library",
"//testing/assertions:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

253
beacon-chain/db/kv/blob.go Normal file
View File

@@ -0,0 +1,253 @@
package kv
import (
"bytes"
"context"
"fmt"
"sort"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
//
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
//
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
//
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
// in the rotating keys buffer, we overwrite all elements for that slot.
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
defer span.End()
sortSideCars(scs)
if err := s.verifySideCars(scs); err != nil {
return err
}
slot := scs[0].Slot
return s.db.Update(func(tx *bolt.Tx) error {
encodedBlobSidecar, err := encode(ctx, &ethpb.BlobSidecars{Sidecars: scs})
if err != nil {
return err
}
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
newKey := blobSidecarKey(scs[0])
rotatingBufferPrefix := newKey[0:8]
var replacingKey []byte
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
if len(k) != 0 {
replacingKey = k
oldSlotBytes := replacingKey[8:16]
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
if oldSlot >= slot {
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", slot, oldSlot)
}
break
}
}
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
// store the blob by key and exit early.
if len(replacingKey) != 0 {
if err := bkt.Delete(replacingKey); err != nil {
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
}
}
return bkt.Put(newKey, encodedBlobSidecar)
})
}
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index.
// It also ensures that indices are sequential and start at 0 and no more than MAX_BLOB_EPOCHS.
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
if len(scs) == 0 {
return errors.New("nil or empty blob sidecars")
}
if uint64(len(scs)) > fieldparams.MaxBlobsPerBlock {
return fmt.Errorf("too many sidecars: %d > %d", len(scs), fieldparams.MaxBlobsPerBlock)
}
sl := scs[0].Slot
pr := scs[0].BlockParentRoot
r := scs[0].BlockRoot
p := scs[0].ProposerIndex
for i, sc := range scs {
if sc.Slot != sl {
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
}
if !bytes.Equal(sc.BlockParentRoot, pr) {
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
}
if !bytes.Equal(sc.BlockRoot, r) {
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
}
if sc.ProposerIndex != p {
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
}
if sc.Index != uint64(i) {
return fmt.Errorf("sidecar index mismatch: %d != %d", sc.Index, i)
}
}
return nil
}
// sortSideCars sorts the sidecars by their index.
func sortSideCars(scs []*ethpb.BlobSidecar) {
sort.Slice(scs, func(i, j int) bool {
return scs[i].Index < scs[j].Index
})
}
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
// If the `indices` argument is omitted, all blobs for the root will be returned.
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
defer span.End()
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.First(); k != nil; k, v = c.Next() {
if bytes.HasSuffix(k, root[:]) {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, ErrNotFound
}
sc := &ethpb.BlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return nil, err
}
return filterForIndices(sc, indices...)
}
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
if len(indices) == 0 {
return sc.Sidecars, nil
}
// This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
// maps 1:1 with indices in the BlobSidecars storage object.
maxIdx := uint64(len(sc.Sidecars)) - 1
sidecars := make([]*ethpb.BlobSidecar, len(indices))
for i, idx := range indices {
if idx > maxIdx {
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
}
sidecars[i] = sc.Sidecars[idx]
}
return sidecars, nil
}
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
// If the `indices` argument is omitted, all blobs for the root will be returned.
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
defer span.End()
var enc []byte
sk := slotKey(slot)
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() {
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
if slotInKey == slot {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, ErrNotFound
}
sc := &ethpb.BlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return nil, err
}
return filterForIndices(sc, indices...)
}
// DeleteBlobSidecar returns true if the blobs are in the db.
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
if err := bkt.Delete(k); err != nil {
return err
}
}
}
return nil
})
}
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
key := slotKey(blob.Slot)
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
key = append(key, blob.BlockRoot...)
return key
}
func slotKey(slot types.Slot) []byte {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
}
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(epochsForBlobSidecarsRequestBucket)
k := []byte("epoch-key")
v := b.Get(k)
if v == nil {
if err := b.Put(k, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
return err
}
return nil
}
e := bytesutil.BytesToUint64BigEndian(v)
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,317 @@
package kv
import (
"context"
"crypto/rand"
"fmt"
"testing"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
"github.com/prysmaticlabs/prysm/v4/testing/require"
bolt "go.etcd.io/bbolt"
)
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
if len(expect) != len(got) {
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
}
for i := 0; i < len(expect); i++ {
es := expect[i]
gs := got[i]
var e string
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
if e != "" {
return errors.New(e)
}
}
return nil
}
func TestStore_BlobSidecars(t *testing.T) {
ctx := context.Background()
t.Run("empty", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 0)
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
})
t.Run("empty by root", func(t *testing.T) {
db := setupDB(t)
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("empty by slot", func(t *testing.T) {
db := setupDB(t)
got, err := db.BlobSidecarsBySlot(ctx, 1)
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("save and retrieve by root (one)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 1)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, 1, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by root (max)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.BlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(expect, got))
require.Equal(t, uint64(0), got[0].Index)
require.Equal(t, uint64(3), got[1].Index)
})
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, 0, len(got))
})
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 1)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, 1, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.BlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(expect, got))
require.Equal(t, uint64(0), got[0].Index)
require.Equal(t, uint64(3), got[1].Index)
})
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, 0, len(got))
})
t.Run("delete works", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("saving a blob with older slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
})
t.Run("saving a new blob for rotation", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, int(fieldparams.MaxBlobsPerBlock), len(scs))
oldBlockRoot := scs[0].BlockRoot
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range newScs {
sc.Slot = sc.Slot + newRetentionSlot
}
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(newScs, got))
})
}
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
blobSidecars := make([]*ethpb.BlobSidecar, n)
for i := uint64(0); i < n; i++ {
blobSidecars[i] = generateBlobSidecar(t, i)
}
return blobSidecars
}
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
blob := make([]byte, 131072)
_, err := rand.Read(blob)
require.NoError(t, err)
kzgCommitment := make([]byte, 48)
_, err = rand.Read(kzgCommitment)
require.NoError(t, err)
kzgProof := make([]byte, 48)
_, err = rand.Read(kzgProof)
require.NoError(t, err)
return &ethpb.BlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
Index: index,
Slot: 100,
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
ProposerIndex: 101,
Blob: blob,
KzgCommitment: kzgCommitment,
KzgProof: kzgProof,
}
}
func TestStore_verifySideCars(t *testing.T) {
s := setupDB(t)
tests := []struct {
name string
scs []*ethpb.BlobSidecar
error string
}{
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), error: "too many sidecars: 7 > 6"},
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
{name: "invalid side index", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 0}}, error: "sidecar index mismatch: 0 != 1"},
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := s.verifySideCars(tt.scs)
if tt.error != "" {
require.Equal(t, tt.error, err.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestStore_sortSidecars(t *testing.T) {
scs := []*ethpb.BlobSidecar{
{Index: 6},
{Index: 4},
{Index: 2},
{Index: 1},
{Index: 3},
{Index: 5},
{},
}
sortSideCars(scs)
for i := 0; i < len(scs)-1; i++ {
require.Equal(t, uint64(i), scs[i].Index)
}
}
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
s := setupDB(b)
ctx := context.Background()
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
}))
err := s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
for i := 1; i < 131071; i++ {
r := make([]byte, 32)
_, err := rand.Read(r)
require.NoError(b, err)
scs := []*ethpb.BlobSidecar{
{BlockRoot: r, Slot: primitives.Slot(i)},
}
k := blobSidecarKey(scs[0])
encodedBlobSidecar, err := encode(ctx, &ethpb.BlobSidecars{Sidecars: scs})
require.NoError(b, err)
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
}
return nil
})
require.NoError(b, err)
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'})
require.NoError(b, err)
}
}
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
dbStore := setupDB(t)
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
nConfig := params.BeaconNetworkConfig()
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
params.OverrideBeaconNetworkConfig(nConfig)
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
}

View File

@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
}
case hasDenebKey(enc):
rawBlock = &ethpb.SignedBeaconBlockDeneb{}
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Deneb block")
}
case hasDenebBlindKey(enc):
rawBlock = &ethpb.SignedBlindedBeaconBlockDeneb{}
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
}
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock = &ethpb.SignedBeaconBlock{}
@@ -854,6 +864,8 @@ func marshalBlockFull(
return nil, err
}
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
case version.Capella:
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
case version.Bellatrix:
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
return nil, errors.Wrap(err, "could not marshal blinded block")
}
switch blk.Version() {
case version.Deneb:
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
case version.Capella:
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
case version.Bellatrix:

View File

@@ -90,6 +90,40 @@ var blockTests = []struct {
return blocks.NewSignedBeaconBlock(b)
},
},
{
name: "deneb",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBeaconBlockDeneb()
b.Block.Slot = slot
if root != nil {
b.Block.ParentRoot = root
b.Block.Body.BlobKzgCommitments = [][]byte{
bytesutil.PadTo([]byte{0x01}, 48),
bytesutil.PadTo([]byte{0x02}, 48),
bytesutil.PadTo([]byte{0x03}, 48),
bytesutil.PadTo([]byte{0x04}, 48),
}
}
return blocks.NewSignedBeaconBlock(b)
},
},
{
name: "deneb blind",
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
b := util.NewBlindedBeaconBlockDeneb()
b.Block.Slot = slot
if root != nil {
b.Block.ParentRoot = root
b.Block.Body.BlobKzgCommitments = [][]byte{
bytesutil.PadTo([]byte{0x05}, 48),
bytesutil.PadTo([]byte{0x06}, 48),
bytesutil.PadTo([]byte{0x07}, 48),
bytesutil.PadTo([]byte{0x08}, 48),
}
}
return blocks.NewSignedBeaconBlock(b)
},
},
}
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
@@ -359,6 +393,10 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
wanted, err = blk.ToBlinded()
require.NoError(t, err)
}
if _, err := blk.PbDenebBlock(); err == nil {
wanted, err = blk.ToBlinded()
require.NoError(t, err)
}
wantedPb, err := wanted.Proto()
require.NoError(t, err)
retrievedPb, err := retrievedBlock.Proto()
@@ -543,7 +581,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 150, len(retrieved))
for _, b := range retrieved {
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpect block slot %d", b.Block().Slot())
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpected block slot %d", b.Block().Slot())
}
})
}
@@ -582,6 +620,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
if _, err := block1.PbDenebBlock(); err == nil {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
wantedPb, err := wanted.Proto()
require.NoError(t, err)
bPb, err := b.Proto()
@@ -604,6 +646,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
wanted2, err = block2.ToBlinded()
require.NoError(t, err)
}
if _, err := block2.PbDenebBlock(); err == nil {
wanted2, err = block2.ToBlinded()
require.NoError(t, err)
}
wanted2Pb, err := wanted2.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -626,6 +672,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
if _, err := block3.PbDenebBlock(); err == nil {
wanted, err = wanted.ToBlinded()
require.NoError(t, err)
}
wantedPb, err = wanted.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -666,6 +716,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
wanted, err = block1.ToBlinded()
require.NoError(t, err)
}
if _, err := block1.PbDenebBlock(); err == nil {
wanted, err = block1.ToBlinded()
require.NoError(t, err)
}
wantedPb, err := wanted.Proto()
require.NoError(t, err)
bPb, err := b.Proto()
@@ -687,6 +741,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
if _, err := genesisBlock.PbDenebBlock(); err == nil {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
wantedPb, err = wanted.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -708,6 +766,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
if _, err := genesisBlock.PbDenebBlock(); err == nil {
wanted, err = genesisBlock.ToBlinded()
require.NoError(t, err)
}
wantedPb, err = wanted.Proto()
require.NoError(t, err)
bPb, err = b.Proto()
@@ -808,6 +870,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
wanted, err = b1.ToBlinded()
require.NoError(t, err)
}
if _, err := b1.PbDenebBlock(); err == nil {
wanted, err = b1.ToBlinded()
require.NoError(t, err)
}
retrieved0Pb, err := retrievedBlocks[0].Proto()
require.NoError(t, err)
wantedPb, err := wanted.Proto()
@@ -828,6 +894,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
wanted, err = b2.ToBlinded()
require.NoError(t, err)
}
if _, err := b2.PbDenebBlock(); err == nil {
wanted, err = b2.ToBlinded()
require.NoError(t, err)
}
retrieved0Pb, err = retrievedBlocks[0].Proto()
require.NoError(t, err)
wantedPb, err = wanted.Proto()
@@ -842,6 +912,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
wanted, err = b3.ToBlinded()
require.NoError(t, err)
}
if _, err := b3.PbDenebBlock(); err == nil {
wanted, err = b3.ToBlinded()
require.NoError(t, err)
}
retrieved1Pb, err := retrievedBlocks[1].Proto()
require.NoError(t, err)
wantedPb, err = wanted.Proto()

View File

@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
}
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
}
func hasDenebKey(enc []byte) bool {
if len(denebKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(denebKey)], denebKey)
}
func hasDenebBlindKey(enc []byte) bool {
if len(denebBlindKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
}

View File

@@ -129,6 +129,9 @@ var Buckets = [][]byte{
feeRecipientBucket,
registrationBucket,
blobsBucket,
epochsForBlobSidecarsRequestBucket,
}
// NewKVStore initializes a new boltDB key-value store at the directory
@@ -199,6 +202,11 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
if err := kv.setupBlockStorageType(ctx); err != nil {
return nil, err
}
if err := checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
}
return kv, nil
}

View File

@@ -130,9 +130,15 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
return err
}
item := enc
if hasAltairKey(item) {
switch {
case hasAltairKey(enc):
item = item[len(altairKey):]
case hasBellatrixKey(enc):
item = item[len(bellatrixKey):]
case hasCapellaKey(enc):
item = item[len(capellaKey):]
}
detector, err := detect.FromState(item)
if err != nil {
return err
@@ -165,9 +171,14 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
return err
}
var stateBytes []byte
if hasAltairKey(enc) {
switch {
case hasAltairKey(enc):
stateBytes = snappy.Encode(nil, append(altairKey, rawObj...))
} else {
case hasBellatrixKey(enc):
stateBytes = snappy.Encode(nil, append(bellatrixKey, rawObj...))
case hasCapellaKey(enc):
stateBytes = snappy.Encode(nil, append(capellaKey, rawObj...))
default:
stateBytes = snappy.Encode(nil, rawObj)
}
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {

View File

@@ -313,3 +313,217 @@ func Test_migrateAltairStateValidators(t *testing.T) {
})
}
}
func Test_migrateBellatrixStateValidators(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
}{
{
name: "migrates validators and adds them to new buckets",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
assert.NoError(t, err)
_, err = tx.CreateBucketIfNotExists(blockRootValidatorHashesBucket)
assert.NoError(t, err)
return nil
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// check whether the new buckets are present
err := dbStore.db.View(func(tx *bbolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
assert.NotNil(t, valBkt)
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
assert.NotNil(t, idxBkt)
return nil
})
assert.NoError(t, err)
// check if the migration worked
blockRoot := [32]byte{'A'}
rcvdState, err := dbStore.State(context.Background(), blockRoot)
assert.NoError(t, err)
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
// find hashes of the validators that are set as part of the state
var hashes []byte
var individualHashes [][]byte
for _, val := range vals {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
hashes = append(hashes, hash[:]...)
individualHashes = append(individualHashes, hash[:])
}
// check if all the validators that were in the state, are stored properly in the validator bucket
pbState, err := state_native.ProtobufBeaconStateBellatrix(rcvdState.ToProtoUnsafe())
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
found := false
for _, h := range individualHashes {
if bytes.Equal(hash[:], h) {
found = true
}
}
require.Equal(t, true, found)
validatorsFoundCount++
}
require.Equal(t, len(vals), validatorsFoundCount)
// check if the state validator indexes are stored properly
err = dbStore.db.View(func(tx *bbolt.Tx) error {
rcvdValhashBytes := tx.Bucket(blockRootValidatorHashesBucket).Get(blockRoot[:])
rcvdValHashes, sErr := snappy.Decode(nil, rcvdValhashBytes)
assert.NoError(t, sErr)
require.DeepEqual(t, hashes, rcvdValHashes)
return nil
})
assert.NoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dbStore := setupDB(t)
// add a state with the given validators
vals := validators(10)
blockRoot := [32]byte{'A'}
st, _ := util.DeterministicGenesisStateBellatrix(t, 20)
err := st.SetFork(&v1alpha1.Fork{
PreviousVersion: params.BeaconConfig().AltairForkVersion,
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
Epoch: 0,
})
require.NoError(t, err)
assert.NoError(t, st.SetSlot(100))
assert.NoError(t, st.SetValidators(vals))
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
// enable historical state representation flag to test this
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: true,
})
defer resetCfg()
tt.setup(t, dbStore, st, vals)
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
tt.eval(t, dbStore, st, vals)
})
}
}
func Test_migrateCapellaStateValidators(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
}{
{
name: "migrates validators and adds them to new buckets",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
assert.NoError(t, err)
_, err = tx.CreateBucketIfNotExists(blockRootValidatorHashesBucket)
assert.NoError(t, err)
return nil
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// check whether the new buckets are present
err := dbStore.db.View(func(tx *bbolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
assert.NotNil(t, valBkt)
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
assert.NotNil(t, idxBkt)
return nil
})
assert.NoError(t, err)
// check if the migration worked
blockRoot := [32]byte{'A'}
rcvdState, err := dbStore.State(context.Background(), blockRoot)
assert.NoError(t, err)
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
// find hashes of the validators that are set as part of the state
var hashes []byte
var individualHashes [][]byte
for _, val := range vals {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
hashes = append(hashes, hash[:]...)
individualHashes = append(individualHashes, hash[:])
}
// check if all the validators that were in the state, are stored properly in the validator bucket
pbState, err := state_native.ProtobufBeaconStateCapella(rcvdState.ToProtoUnsafe())
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
found := false
for _, h := range individualHashes {
if bytes.Equal(hash[:], h) {
found = true
}
}
require.Equal(t, true, found)
validatorsFoundCount++
}
require.Equal(t, len(vals), validatorsFoundCount)
// check if the state validator indexes are stored properly
err = dbStore.db.View(func(tx *bbolt.Tx) error {
rcvdValhashBytes := tx.Bucket(blockRootValidatorHashesBucket).Get(blockRoot[:])
rcvdValHashes, sErr := snappy.Decode(nil, rcvdValhashBytes)
assert.NoError(t, sErr)
require.DeepEqual(t, hashes, rcvdValHashes)
return nil
})
assert.NoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dbStore := setupDB(t)
// add a state with the given validators
vals := validators(10)
blockRoot := [32]byte{'A'}
st, _ := util.DeterministicGenesisStateCapella(t, 20)
err := st.SetFork(&v1alpha1.Fork{
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: 0,
})
require.NoError(t, err)
assert.NoError(t, st.SetSlot(100))
assert.NoError(t, st.SetValidators(vals))
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
// enable historical state representation flag to test this
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: true,
})
defer resetCfg()
tt.setup(t, dbStore, st, vals)
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
tt.eval(t, dbStore, st, vals)
})
}
}

View File

@@ -46,6 +46,7 @@ var (
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
blobsBucket = []byte("blobs")
// Below keys are used to identify objects are to be fork compatible.
// Objects that are only compatible with specific forks should be prefixed with such keys.
@@ -55,6 +56,9 @@ var (
capellaKey = []byte("capella")
capellaBlindKey = []byte("blind-capella")
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
denebKey = []byte("deneb")
denebBlindKey = []byte("blind-deneb")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
@@ -70,4 +74,7 @@ var (
// Migrations
migrationsBucket = []byte("migrations")
// Stores how long to keep the blob sidecars for.
epochsForBlobSidecarsRequestBucket = []byte("epochs-for-blob-sidecars-request")
)

View File

@@ -229,34 +229,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
switch rawType := states[i].ToProtoUnsafe().(type) {
case *ethpb.BeaconState:
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
pbState, err := getPhase0PbState(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
encodedState, err := encode(ctx, pbState)
if err != nil {
return err
}
pbState.Validators = valEntries
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateAltair:
pbState, err := statenative.ProtobufBeaconStateAltair(rawType)
pbState, err := getAltairPbState(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
@@ -272,13 +266,10 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
return err
}
case *ethpb.BeaconStateBellatrix:
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawType)
pbState, err := getBellatrixPbState(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
@@ -294,12 +285,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
return err
}
case *ethpb.BeaconStateCapella:
pbState, err := statenative.ProtobufBeaconStateCapella(rawType)
pbState, err := getCapellaPbState(rawType)
if err != nil {
return err
}
if pbState == nil {
return errors.New("nil state")
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
rawObj, err := pbState.MarshalSSZ()
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
if err := bucket.Put(rt[:], encodedState); err != nil {
return err
}
pbState.Validators = valEntries
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
return err
}
case *ethpb.BeaconStateDeneb:
pbState, err := getDenebPbState(rawType)
if err != nil {
return err
}
valEntries := pbState.Validators
pbState.Validators = make([]*ethpb.Validator, 0)
@@ -323,6 +330,61 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
}
func getPhase0PbState(rawState interface{}) (*ethpb.BeaconState, error) {
pbState, err := statenative.ProtobufBeaconStatePhase0(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getAltairPbState(rawState interface{}) (*ethpb.BeaconStateAltair, error) {
pbState, err := statenative.ProtobufBeaconStateAltair(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getBellatrixPbState(rawState interface{}) (*ethpb.BeaconStateBellatrix, error) {
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getCapellaPbState(rawState interface{}) (*ethpb.BeaconStateCapella, error) {
pbState, err := statenative.ProtobufBeaconStateCapella(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func getDenebPbState(rawState interface{}) (*ethpb.BeaconStateDeneb, error) {
pbState, err := statenative.ProtobufBeaconStateDeneb(rawState)
if err != nil {
return nil, err
}
if pbState == nil {
return nil, errors.New("nil state")
}
return pbState, nil
}
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
valBkt := tx.Bucket(stateValidatorsBucket)
for hashStr, validatorEntry := range validatorsEntries {
@@ -472,6 +534,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
}
switch {
case hasDenebKey(enc):
protoState := &ethpb.BeaconStateDeneb{}
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return nil, err
}
if ok {
protoState.Validators = validatorEntries
}
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
case hasCapellaKey(enc):
// Marshal state bytes to capella beacon state.
protoState := &ethpb.BeaconStateCapella{}
@@ -579,6 +654,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
return nil, err
}
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
case *ethpb.BeaconStateDeneb:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
if !ok {
return nil, errors.New("non valid inner state")
}
if rState == nil {
return nil, errors.New("nil state")
}
rawObj, err := rState.MarshalSSZ()
if err != nil {
return nil, err
}
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
default:
return nil, errors.New("invalid inner state")
}

View File

@@ -900,6 +900,100 @@ func TestBellatrixState_CanDelete(t *testing.T) {
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestDenebState_CanSaveRetrieve(t *testing.T) {
db := setupDB(t)
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
savedS, err = db.State(context.Background(), [32]byte{'B'})
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestDenebState_CanDelete(t *testing.T) {
db := setupDB(t)
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
require.NoError(t, st.SetSlot(100))
require.NoError(t, db.SaveState(context.Background(), st, r))
require.Equal(t, true, db.HasState(context.Background(), r))
require.NoError(t, db.DeleteState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
}
func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) {
db := setupDB(t)
// enable historical state representation flag to test this
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: true,
})
defer resetCfg()
r := [32]byte{'A'}
require.Equal(t, false, db.HasState(context.Background(), r))
stateValidators := validators(10)
st, _ := util.DeterministicGenesisStateDeneb(t, 20)
require.NoError(t, st.SetSlot(100))
require.NoError(t, st.SetValidators(stateValidators))
ctx := context.Background()
require.NoError(t, db.SaveState(ctx, st, r))
assert.Equal(t, true, db.HasState(context.Background(), r))
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching")
// check if the index of the second state is still present.
err = db.db.Update(func(tx *bolt.Tx) error {
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
data := idxBkt.Get(r[:])
require.NotEqual(t, 0, len(data))
return nil
})
require.NoError(t, err)
// check if all the validator entries are still intact in the validator entry bucket.
err = db.db.Update(func(tx *bolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
// if any of the original validator entry is not present, then fail the test.
for _, val := range stateValidators {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
data := valBkt.Get(hash[:])
require.NotNil(t, data)
require.NotEqual(t, 0, len(data))
}
return nil
})
require.NoError(t, err)
}
func BenchmarkState_CheckStateSaveTime_1(b *testing.B) { checkStateSaveTime(b, 1) }
func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b, 10) }

View File

@@ -14,7 +14,6 @@ go_library(
"metrics.go",
"options.go",
"prometheus.go",
"provider.go",
"rpc_connection.go",
"service.go",
],
@@ -49,7 +48,6 @@ go_library(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/clientstats:go_default_library",
"//monitoring/tracing:go_default_library",
"//network:go_default_library",
@@ -90,7 +88,6 @@ go_test(
"init_test.go",
"log_processing_test.go",
"prometheus_test.go",
"provider_test.go",
"service_test.go",
],
data = glob(["testdata/**"]),
@@ -122,7 +119,6 @@ go_test(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/clientstats:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",

View File

@@ -23,7 +23,6 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
@@ -50,6 +49,7 @@ const (
NewPayloadMethod = "engine_newPayloadV1"
// NewPayloadMethodV2 v2 request string for JSON-RPC.
NewPayloadMethodV2 = "engine_newPayloadV2"
NewPayloadMethodV3 = "engine_newPayloadV3"
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
@@ -58,6 +58,7 @@ const (
GetPayloadMethod = "engine_getPayloadV1"
// GetPayloadMethodV2 v2 request string for JSON-RPC.
GetPayloadMethodV2 = "engine_getPayloadV2"
GetPayloadMethodV3 = "engine_getPayloadV3"
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
ExchangeTransitionConfigurationMethod = "engine_exchangeTransitionConfigurationV1"
// ExecutionBlockByHashMethod request string for JSON-RPC.
@@ -96,11 +97,11 @@ type ExecutionPayloadReconstructor interface {
// EngineCaller defines a client that can interact with an Ethereum
// execution node's engine service via JSON-RPC.
type EngineCaller interface {
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error)
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error)
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error)
ExchangeTransitionConfiguration(
ctx context.Context, cfg *pb.TransitionConfiguration,
) error
@@ -111,7 +112,7 @@ type EngineCaller interface {
var EmptyBlockHash = errors.New("Block hash is empty 0x0000...")
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error) {
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes [][32]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
defer span.End()
start := time.Now()
@@ -143,6 +144,15 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
if err != nil {
return nil, handleRPCError(err)
}
case *pb.ExecutionPayloadDeneb:
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadDeneb)
if !ok {
return nil, errors.New("execution data must be a Deneb execution payload")
}
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes)
if err != nil {
return nil, handleRPCError(err)
}
default:
return nil, errors.New("unknown execution data type")
}
@@ -190,7 +200,7 @@ func (s *Service) ForkchoiceUpdated(
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Capella:
case version.Capella, version.Deneb:
a, err := attrs.PbV2()
if err != nil {
return nil, nil, err
@@ -220,7 +230,8 @@ func (s *Service) ForkchoiceUpdated(
}
// GetPayload calls the engine_getPayloadVX method via JSON-RPC.
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, error) {
// It returns the execution data as well as the blobs bundle.
func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetPayload")
defer span.End()
start := time.Now()
@@ -232,23 +243,42 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch {
result := &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV3, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, nil, handleRPCError(err)
}
ed, err := blocks.WrappedExecutionPayloadDeneb(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, err
}
return ed, result.BlobsBundle, nil
}
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
result := &pb.ExecutionPayloadCapellaWithValue{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethodV2, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, handleRPCError(err)
return nil, nil, handleRPCError(err)
}
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(result.Value))
return blocks.WrappedExecutionPayloadCapella(result.Payload, math.WeiToGwei(v))
ed, err := blocks.WrappedExecutionPayloadCapella(result.Payload, blocks.PayloadValueToGwei(result.Value))
if err != nil {
return nil, nil, err
}
return ed, nil, nil
}
result := &pb.ExecutionPayload{}
err := s.rpcClient.CallContext(ctx, result, GetPayloadMethod, pb.PayloadIDBytes(payloadId))
if err != nil {
return nil, handleRPCError(err)
return nil, nil, handleRPCError(err)
}
return blocks.WrappedExecutionPayload(result)
ed, err := blocks.WrappedExecutionPayload(result)
if err != nil {
return nil, nil, err
}
return ed, nil, nil
}
// ExchangeTransitionConfiguration calls the engine_exchangeTransitionConfigurationV1 method via JSON-RPC.
@@ -684,7 +714,8 @@ func fullPayloadFromExecutionBlock(
txs[i] = txBin
}
if block.Version == version.Bellatrix {
switch block.Version {
case version.Bellatrix:
return blocks.WrappedExecutionPayload(&pb.ExecutionPayload{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
@@ -701,24 +732,51 @@ func fullPayloadFromExecutionBlock(
BlockHash: blockHash[:],
Transactions: txs,
})
case version.Capella:
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
case version.Deneb:
edg, err := header.ExcessDataGas()
if err != nil {
return nil, errors.Wrap(err, "unable to extract ExcessDataGas attribute from excution payload header")
}
return blocks.WrappedExecutionPayloadDeneb(
&pb.ExecutionPayloadDeneb{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
ExcessDataGas: edg,
}, 0) // We can't get the block value and don't care about the block value for this instance
default:
return nil, fmt.Errorf("unknown execution block version %d", block.Version)
}
return blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
ParentHash: header.ParentHash(),
FeeRecipient: header.FeeRecipient(),
StateRoot: header.StateRoot(),
ReceiptsRoot: header.ReceiptsRoot(),
LogsBloom: header.LogsBloom(),
PrevRandao: header.PrevRandao(),
BlockNumber: header.BlockNumber(),
GasLimit: header.GasLimit(),
GasUsed: header.GasUsed(),
Timestamp: header.Timestamp(),
ExtraData: header.ExtraData(),
BaseFeePerGas: header.BaseFeePerGas(),
BlockHash: blockHash[:],
Transactions: txs,
Withdrawals: block.Withdrawals,
}, 0) // We can't get the block value and don't care about the block value for this instance
}
// Handles errors received from the RPC server according to the specification.

View File

@@ -75,7 +75,7 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionPayload"].(*pb.ExecutionPayload)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, err := srv.GetPayload(ctx, payloadId, 1)
resp, _, err := srv.GetPayload(ctx, payloadId, 1)
require.NoError(t, err)
resPb, err := resp.PbBellatrix()
require.NoError(t, err)
@@ -85,7 +85,7 @@ func TestClient_IPC(t *testing.T) {
want, ok := fix["ExecutionPayloadCapellaWithValue"].(*pb.ExecutionPayloadCapellaWithValue)
require.Equal(t, true, ok)
payloadId := [8]byte{1}
resp, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
resp, _, err := srv.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
resPb, err := resp.PbCapella()
require.NoError(t, err)
@@ -118,7 +118,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -129,7 +129,7 @@ func TestClient_IPC(t *testing.T) {
require.Equal(t, true, ok)
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req, 0)
require.NoError(t, err)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload)
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.NoError(t, err)
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
})
@@ -163,6 +163,7 @@ func TestClient_HTTP(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
params.OverrideBeaconConfig(cfg)
t.Run(GetPayloadMethod, func(t *testing.T) {
@@ -203,7 +204,7 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.GetPayload(ctx, payloadId, 1)
resp, _, err := client.GetPayload(ctx, payloadId, 1)
require.NoError(t, err)
pb, err := resp.PbBellatrix()
require.NoError(t, err)
@@ -247,7 +248,7 @@ func TestClient_HTTP(t *testing.T) {
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
resp, _, err := client.GetPayload(ctx, payloadId, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
pb, err := resp.PbCapella()
require.NoError(t, err)
@@ -262,6 +263,60 @@ func TestClient_HTTP(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1236), v)
})
t.Run(GetPayloadMethodV3, func(t *testing.T) {
payloadId := [8]byte{1}
want, ok := fix["ExecutionPayloadDenebWithValue"].(*pb.GetPayloadV3ResponseJson)
require.Equal(t, true, ok)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := io.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": want,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
defer srv.Close()
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
defer rpcClient.Close()
client := &Service{}
client.rpcClient = rpcClient
// We call the RPC method via HTTP and expect a proper result.
resp, blobsBundle, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
g, err := resp.ExcessDataGas()
require.NoError(t, err)
require.DeepEqual(t, uint64(3), g)
g, err = resp.DataGasUsed()
require.NoError(t, err)
require.DeepEqual(t, uint64(2), g)
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, commitments, blobsBundle.KzgCommitments)
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
require.DeepEqual(t, proofs, blobsBundle.Proofs)
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
require.DeepEqual(t, blobs, blobsBundle.Blobs)
})
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
@@ -412,7 +467,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -426,7 +481,21 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
t.Run(NewPayloadMethodV3+" VALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.NoError(t, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -440,7 +509,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -454,7 +523,21 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethodV3+" SYNCING status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -468,7 +551,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -482,7 +565,21 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
t.Run(NewPayloadMethodV3+" INVALID_BLOCK_HASH status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -496,7 +593,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -510,7 +607,21 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
t.Run(NewPayloadMethodV3+" INVALID status", func(t *testing.T) {
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
require.Equal(t, true, ok)
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
require.Equal(t, true, ok)
client := newPayloadV3Setup(t, want, execPayload)
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload, 0)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
require.DeepEqual(t, want.LatestValidHash, resp)
})
@@ -524,7 +635,7 @@ func TestClient_HTTP(t *testing.T) {
// We call the RPC method via HTTP and expect a proper result.
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
require.NoError(t, err)
resp, err := client.NewPayload(ctx, wrappedPayload)
resp, err := client.NewPayload(ctx, wrappedPayload, [][32]byte{})
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
require.DeepEqual(t, []uint8(nil), resp)
})
@@ -1306,6 +1417,25 @@ func fixtures() map[string]interface{} {
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
}
executionPayloadFixtureDeneb := &pb.ExecutionPayloadDeneb{
ParentHash: foo[:],
FeeRecipient: bar,
StateRoot: foo[:],
ReceiptsRoot: foo[:],
LogsBloom: baz,
PrevRandao: foo[:],
BlockNumber: 1,
GasLimit: 1,
GasUsed: 1,
Timestamp: 1,
ExtraData: foo[:],
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
BlockHash: foo[:],
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
DataGasUsed: 2,
ExcessDataGas: 3,
}
hexUint := hexutil.Uint64(1)
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
@@ -1326,6 +1456,34 @@ func fixtures() map[string]interface{} {
},
BlockValue: "0x11fffffffff",
}
dgu := hexutil.Uint64(2)
edg := hexutil.Uint64(3)
executionPayloadWithValueFixtureDeneb := &pb.GetPayloadV3ResponseJson{
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
ParentHash: &common.Hash{'a'},
FeeRecipient: &common.Address{'b'},
StateRoot: &common.Hash{'c'},
ReceiptsRoot: &common.Hash{'d'},
LogsBloom: &hexutil.Bytes{'e'},
PrevRandao: &common.Hash{'f'},
BaseFeePerGas: "0x123",
BlockHash: &common.Hash{'g'},
Transactions: []hexutil.Bytes{{'h'}},
Withdrawals: []*pb.Withdrawal{},
BlockNumber: &hexUint,
GasLimit: &hexUint,
GasUsed: &hexUint,
Timestamp: &hexUint,
DataGasUsed: &dgu,
ExcessDataGas: &edg,
},
BlockValue: "0x11fffffffff",
BlobsBundle: &pb.BlobBundleJSON{
Commitments: [][48]byte{bytesutil.ToBytes48([]byte("commitment1")), bytesutil.ToBytes48([]byte("commitment2"))},
Proofs: [][48]byte{bytesutil.ToBytes48([]byte("proof1")), bytesutil.ToBytes48([]byte("proof2"))},
Blobs: [][]byte{{'a'}, {'b'}},
},
}
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
@@ -1421,7 +1579,9 @@ func fixtures() map[string]interface{} {
"ExecutionBlock": executionBlock,
"ExecutionPayload": executionPayloadFixture,
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
"ExecutionPayloadDeneb": executionPayloadFixtureDeneb,
"ExecutionPayloadCapellaWithValue": executionPayloadWithValueFixtureCapella,
"ExecutionPayloadDenebWithValue": executionPayloadWithValueFixtureDeneb,
"ValidPayloadStatus": validStatus,
"InvalidBlockHashStatus": inValidBlockHashStatus,
"AcceptedStatus": acceptedStatus,
@@ -1496,6 +1656,126 @@ func Test_fullPayloadFromExecutionBlock(t *testing.T) {
}
}
func Test_fullPayloadFromExecutionBlockCapella(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeaderCapella
block *pb.ExecutionBlock
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
name string
args args
want func() interfaces.ExecutionData
err string
}{
{
name: "block hash field in header and block hash mismatch",
args: args{
header: &pb.ExecutionPayloadHeaderCapella{
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Capella,
Hash: common.BytesToHash([]byte("bar")),
},
},
err: "does not match execution block hash",
},
{
name: "ok",
args: args{
header: &pb.ExecutionPayloadHeaderCapella{
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Capella,
Hash: wantedHash,
},
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayloadCapella(&pb.ExecutionPayloadCapella{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, 0)
require.NoError(t, err)
return p
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderCapella(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {
assert.DeepEqual(t, tt.want(), got)
}
})
}
}
func Test_fullPayloadFromExecutionBlockDeneb(t *testing.T) {
type args struct {
header *pb.ExecutionPayloadHeaderDeneb
block *pb.ExecutionBlock
}
wantedHash := common.BytesToHash([]byte("foo"))
tests := []struct {
name string
args args
want func() interfaces.ExecutionData
err string
}{
{
name: "block hash field in header and block hash mismatch",
args: args{
header: &pb.ExecutionPayloadHeaderDeneb{
BlockHash: []byte("foo"),
},
block: &pb.ExecutionBlock{
Version: version.Deneb,
Hash: common.BytesToHash([]byte("bar")),
},
},
err: "does not match execution block hash",
},
{
name: "ok",
args: args{
header: &pb.ExecutionPayloadHeaderDeneb{
BlockHash: wantedHash[:],
},
block: &pb.ExecutionBlock{
Version: version.Deneb,
Hash: wantedHash,
},
},
want: func() interfaces.ExecutionData {
p, err := blocks.WrappedExecutionPayloadDeneb(&pb.ExecutionPayloadDeneb{
BlockHash: wantedHash[:],
Transactions: [][]byte{},
}, 0)
require.NoError(t, err)
return p
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
wrapped, err := blocks.WrappedExecutionPayloadHeaderDeneb(tt.args.header, 0)
require.NoError(t, err)
got, err := fullPayloadFromExecutionBlock(wrapped, tt.args.block)
if err != nil {
assert.ErrorContains(t, tt.err, err)
} else {
assert.DeepEqual(t, tt.want(), got)
}
})
}
}
func TestHeaderByHash_NotFound(t *testing.T) {
srv := &Service{}
srv.rpcClient = RPCClientBad{}
@@ -1819,6 +2099,40 @@ func newPayloadV2Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
return service
}
func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadDeneb) *Service {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := io.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
reqArg, err := json.Marshal(payload)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(reqArg),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": status,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
service := &Service{}
service.rpcClient = rpcClient
return service
}
func TestCapella_PayloadBodiesByHash(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableOptionalEngineMethods: true,

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
)
@@ -15,7 +16,7 @@ type Option func(s *Service) error
// WithHttpEndpoint parse http endpoint for the powchain service to use.
func WithHttpEndpoint(endpointString string) Option {
return func(s *Service) error {
s.cfg.currHttpEndpoint = HttpEndpoint(endpointString)
s.cfg.currHttpEndpoint = network.HttpEndpoint(endpointString)
return nil
}
}
@@ -27,7 +28,7 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
return nil
}
// Overwrite authorization type for all endpoints to be of a bearer type.
hEndpoint := HttpEndpoint(endpointString)
hEndpoint := network.HttpEndpoint(endpointString)
hEndpoint.Auth.Method = authorization.Bearer
hEndpoint.Auth.Value = string(secret)

View File

@@ -15,7 +15,7 @@ import (
func TestCleanup(t *testing.T) {
ctx := context.Background()
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
unregistered := pc.unregister()
assert.Equal(t, true, unregistered, "PowchainCollector.unregister did not return true (via prometheus.DefaultRegistry)")
// PowchainCollector is a prometheus.Collector, so we should be able to register it again
@@ -39,7 +39,7 @@ func TestCleanup(t *testing.T) {
func TestCancelation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
ticker := time.NewTicker(10 * time.Second)
cancel()
select {

View File

@@ -1,49 +0,0 @@
package execution
import (
"encoding/base64"
"strings"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
)
// HttpEndpoint extracts an httputils.Endpoint from the provider parameter.
func HttpEndpoint(eth1Provider string) network.Endpoint {
endpoint := network.Endpoint{
Url: "",
Auth: network.AuthorizationData{
Method: authorization.None,
Value: "",
}}
authValues := strings.Split(eth1Provider, ",")
endpoint.Url = strings.TrimSpace(authValues[0])
if len(authValues) > 2 {
log.Errorf(
"ETH1 endpoint string can contain one comma for specifying the authorization header to access the provider."+
" String contains too many commas: %d. Skipping authorization.", len(authValues)-1)
} else if len(authValues) == 2 {
switch network.Method(strings.TrimSpace(authValues[1])) {
case authorization.Basic:
basicAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
if len(basicAuthValues) != 2 {
log.Errorf("Basic Authentication has incorrect format. Skipping authorization.")
} else {
endpoint.Auth.Method = authorization.Basic
endpoint.Auth.Value = base64.StdEncoding.EncodeToString([]byte(basicAuthValues[1]))
}
case authorization.Bearer:
bearerAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
if len(bearerAuthValues) != 2 {
log.Errorf("Bearer Authentication has incorrect format. Skipping authorization.")
} else {
endpoint.Auth.Method = authorization.Bearer
endpoint.Auth.Value = bearerAuthValues[1]
}
case authorization.None:
log.Errorf("Authorization has incorrect format or authorization type is not supported.")
}
}
return endpoint
}

View File

@@ -1,74 +0,0 @@
package execution
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestHttpEndpoint(t *testing.T) {
hook := logTest.NewGlobal()
url := "http://test"
t.Run("URL", func(t *testing.T) {
endpoint := HttpEndpoint(url)
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("URL with separator", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("URL with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(" " + url + " ,")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("Basic auth", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Basic username:password")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
})
t.Run("Basic auth with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(url + ", Basic username:password ")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
})
t.Run("Basic auth with incorrect format", func(t *testing.T) {
hook.Reset()
endpoint := HttpEndpoint(url + ",Basic username:password foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
t.Run("Bearer auth", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Bearer token")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
assert.Equal(t, "token", endpoint.Auth.Value)
})
t.Run("Bearer auth with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(url + ", Bearer token ")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
assert.Equal(t, "token", endpoint.Auth.Value)
})
t.Run("Bearer auth with incorrect format", func(t *testing.T) {
hook.Reset()
endpoint := HttpEndpoint(url + ",Bearer token foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
t.Run("Too many separators", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Bearer token,foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
}

View File

@@ -3,7 +3,6 @@ package execution
import (
"context"
"fmt"
"net/url"
"strings"
"time"
@@ -107,26 +106,10 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
// Initializes an RPC connection with authentication headers.
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
// Need to handle ipc and http
var client *gethRPC.Client
u, err := url.Parse(endpoint.Url)
client, err := network.NewExecutionRPCClient(ctx, endpoint)
if err != nil {
return nil, err
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
if err != nil {
return nil, err
}
case "", "ipc":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {

View File

@@ -23,6 +23,7 @@ go_library(
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",

View File

@@ -14,6 +14,7 @@ import (
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
pb "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -25,6 +26,7 @@ type EngineClient struct {
ForkChoiceUpdatedResp []byte
ExecutionPayload *pb.ExecutionPayload
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
ExecutionBlock *pb.ExecutionBlock
Err error
ErrLatestExecBlock error
@@ -39,10 +41,11 @@ type EngineClient struct {
TerminalBlockHashExists bool
OverrideValidHash [32]byte
BlockValue uint64
BlobsBundle *pb.BlobsBundle
}
// NewPayload --
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData) ([]byte, error) {
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ [][32]byte) ([]byte, error) {
return e.NewPayloadResp, e.ErrNewPayload
}
@@ -57,15 +60,26 @@ func (e *EngineClient) ForkchoiceUpdated(
}
// GetPayload --
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, error) {
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (interfaces.ExecutionData, *pb.BlobsBundle, error) {
if slots.ToEpoch(s) >= params.BeaconConfig().DenebForkEpoch {
ed, err := blocks.WrappedExecutionPayloadDeneb(e.ExecutionPayloadDeneb, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, err
}
return ed, e.BlobsBundle, nil
}
if slots.ToEpoch(s) >= params.BeaconConfig().CapellaForkEpoch {
return blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, e.BlockValue)
ed, err := blocks.WrappedExecutionPayloadCapella(e.ExecutionPayloadCapella, math.Gwei(e.BlockValue))
if err != nil {
return nil, nil, err
}
return ed, nil, nil
}
p, err := blocks.WrappedExecutionPayload(e.ExecutionPayload)
if err != nil {
return nil, err
return nil, nil, err
}
return p, e.ErrGetPayload
return p, nil, e.ErrGetPayload
}
// ExchangeTransitionConfiguration --

View File

@@ -228,10 +228,44 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
return n.root, nil
}
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
node, ok := f.store.nodeByRoot[cp.Root]
if !ok || node == nil {
return false, nil
}
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
}
if node.slot > epochStart {
return false, nil
}
if len(node.children) == 0 {
return true, nil
}
if node.slot == epochStart {
return true, nil
}
nodeEpoch := slots.ToEpoch(node.slot)
if nodeEpoch >= cp.Epoch {
return false, nil
}
for _, child := range node.children {
if child.slot > epochStart {
return true, nil
}
}
return false, nil
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes.
func (f *ForkChoice) updateBalances() error {
newBalances := f.justifiedBalances
zHash := params.BeaconConfig().ZeroHash
for index, vote := range f.votes {
// Skip if validator has been slashed
@@ -240,7 +274,7 @@ func (f *ForkChoice) updateBalances() error {
}
// Skip if validator has never voted for current root and next root (i.e. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
if vote.currentRoot == zHash && vote.nextRoot == zHash {
continue
}
@@ -260,7 +294,7 @@ func (f *ForkChoice) updateBalances() error {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
return errors.Wrap(ErrNilNode, "could not update balances")
@@ -269,7 +303,7 @@ func (f *ForkChoice) updateBalances() error {
}
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
return errors.Wrap(ErrNilNode, "could not update balances")
@@ -594,3 +628,12 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
return nil
}
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.slot, nil
}

View File

@@ -754,3 +754,110 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
got := f.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, [32]byte{'A'}, got)
}
func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children
require.NoError(t, f.InsertNode(ctx, st, root))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
require.NoError(t, err)
require.Equal(t, true, viable)
st, broot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, broot))
// Epoch start
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children but impossible checkpoint
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
st, croot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, broot, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, croot))
// Children in same epoch
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
st, droot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, broot, [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, droot))
// Children in next epoch but boundary
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// Boundary block
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 0})
require.NoError(t, err)
require.Equal(t, false, viable)
// Children in next epoch
st, eroot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, broot, [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, eroot))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
}
func TestForkChoiceSlot(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
_, err = f.Slot(root)
require.ErrorIs(t, ErrNilNode, err)
require.NoError(t, f.InsertNode(ctx, st, root))
slot, err := f.Slot(root)
require.NoError(t, err)
require.Equal(t, primitives.Slot(3), slot)
}

View File

@@ -31,9 +31,7 @@ import (
// store.justified_checkpoint = store.best_justified_checkpoint
func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
// Reset proposer boost root
if err := f.resetBoostedProposerRoot(ctx); err != nil {
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
}
f.store.proposerBoostRoot = [32]byte{}
// Return if it's not a new epoch.
if !slots.IsEpochStart(slot) {

View File

@@ -1,19 +1,12 @@
package doublylinkedtree
import (
"context"
"fmt"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
)
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostRoot = [32]byte{}
return nil
}
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes.
func (f *ForkChoice) applyProposerBoostScore() error {

View File

@@ -82,6 +82,11 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
return
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return
}
return true
}
@@ -137,6 +142,11 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
return head.root
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return head.root
}
// Only reorg if we are proposing early
secs, err := slots.SecondsSinceSlotStart(head.slot+1, f.store.genesisTime, uint64(time.Now().Unix()))
if err != nil {

View File

@@ -22,7 +22,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
@@ -80,6 +84,12 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent = saved
})
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight
require.Equal(t, false, f.ShouldOverrideFCU())
@@ -101,7 +111,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
driftGenesisTime(f, 3, 1)
childRoot := [32]byte{'b'}
@@ -161,6 +175,12 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
require.Equal(t, childRoot, f.GetProposerHead())
f.store.headNode.parent = saved
})
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight
require.Equal(t, childRoot, f.GetProposerHead())

View File

@@ -53,6 +53,7 @@ type Getter interface {
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, primitives.Slot, error)
IsCanonical(root [32]byte) bool
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
FinalizedPayloadBlockHash() [32]byte
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
@@ -66,6 +67,7 @@ type Getter interface {
Tips() ([][32]byte, []primitives.Slot)
IsOptimistic(root [32]byte) (bool, error)
ShouldOverrideFCU() bool
Slot([32]byte) (primitives.Slot, error)
}
// Setter allows to set forkchoice information

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/sirupsen/logrus"
)
@@ -29,6 +30,9 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
if blk == nil || blk.Body() == nil {
return
}
if blk.Version() == version.Phase0 {
return
}
bits, err := blk.Body().SyncAggregate()
if err != nil {
log.WithError(err).Error("Could not get SyncAggregate")

View File

@@ -114,19 +114,11 @@ func (s *Service) Start() {
"ValidatorIndices": tracked,
}).Info("Starting service")
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
go s.run(stateChannel, stateSub)
go s.run()
}
// run waits until the beacon is synced and starts the monitoring system.
func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) {
if stateChannel == nil {
log.Error("State state is nil")
return
}
func (s *Service) run() {
if err := s.waitForSync(s.config.InitialSyncComplete); err != nil {
log.WithError(err)
return
@@ -154,6 +146,8 @@ func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription
s.isLogging = true
s.Unlock()
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
s.monitorRoutine(stateChannel, stateSub)
}

View File

@@ -271,11 +271,9 @@ func TestWaitForSyncCanceled(t *testing.T) {
func TestRun(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
go func() {
s.run(stateChannel, stateSub)
s.run()
}()
close(s.config.InitialSyncComplete)

Some files were not shown because too many files have changed in this diff Show More