Compare commits

...

77 Commits

Author SHA1 Message Date
Nishant Das
a75e78ddb4 Ignore Late Message Logs (#12525) 2023-06-14 10:37:39 +00:00
Nishant Das
1862422db9 Remove Defer In ProposeGenericBlock (#12524) 2023-06-14 05:25:52 +00:00
james-prysm
152d21059e adding additional comments and safe copies to protos (#12518) 2023-06-13 10:31:29 -05:00
terencechain
2b410893a0 optimization: epoch boundary uses next slot cache (#12515)
* optimization: epoch boundary uses next slot cache

* test: fix

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-12 17:13:49 +00:00
Potuz
826267310e benchmark cold hashing of capella beaconstates (#12516)
* benchmark cold hashing of capella beaconstates

* use since
2023-06-12 16:54:43 +00:00
Nishant Das
d5057cfb42 Add the Ability for Prysm To Handle Trusted Peers (#12492)
* add all changes

* add to peers to watch

* add tests

* Update beacon-chain/p2p/peers/peerdata/store_test.go

* radek's review

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-06-12 14:47:52 +00:00
james-prysm
8d01cf2ec1 change update duties to handle all validators exited check (#12505)
* wip have update duties handle all validators updated

* removing function and adding tests

* removing unnessesary test

* fixing unit test

* gaz

* removing number on wait group

* trying lower threshold to reduce timeout

* testing removal of test to resolve timeout on buildkite

* gaz

* removing test that is breaking buildkite on timeouts, will need to return to revaluate difference between buildkite and local mock

* addressing feedback

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-12 14:27:52 +00:00
Potuz
e4e315da94 log validation time for blocks (#12514) 2023-06-12 22:06:57 +08:00
terencechain
0a4e42545e Use next slot cache for sync committee (#12287)
* Use next slot cache for sync committee

* RWMutex

* change mutex for last cached state

* feat: change mutex

* test: add db

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-12 04:30:06 +00:00
kasey
6fa2d768b5 Checkpoint sync: get block using state.latest_block_header.slot (#12447)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-11 03:00:38 +00:00
Nishant Das
0f228896b0 Add Patch For Libp2p (#12507)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-06-11 01:59:18 +00:00
terencechain
6896b41963 optimization(proposer rpc): move htr to after broadcast (#12504) 2023-06-09 06:32:29 -07:00
Nishant Das
3bf6abe27c Ignore Phase0 Blocks For Monitor (#12503) 2023-06-09 05:00:36 +00:00
Nishant Das
c1391f0de3 Always Favour Yamux for Multiplexing (#12502) 2023-06-08 04:02:46 +00:00
james-prysm
6672d1499a prysmctl: output proposer settings (#12181)
* wip proposer settings

* WIP validator client APIs

* adding proposer settings output

* adding unit tests

* fixing linting

* fixing deepsource issues

* fixing e2e

* fixing deep source issue

* updating naming to not stutter

* updating bazel

* fixing linting error

* reverting comment

* adding builder settings

* gaz

* Update validator/client/validator.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* adding comments

* adding some tests

* gaz

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/errors.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/options.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/cmd.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/validator/client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/prysmctl/validator/proposer_settings.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/errors.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing feedback

* fixing unit test

* addressign comments

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-06-06 17:03:30 +00:00
Nishant Das
33cf52831c Update Libp2p to v0.27.5 (#12486)
* add deps

* update to v0.27.5 and handle panic
2023-06-06 08:41:15 +08:00
terencechain
d543e9be00 Update spec tests to v1.4.0-alpha.1 (#12489) 2023-06-03 11:17:13 +00:00
Nishant Das
0669050ffa Add Appropriate Size for the Attestation Queue (#12485)
* add tag

* fix off by 1
2023-06-02 11:33:28 +00:00
zghh
ceff0c2024 Fix the bug that return 500 in /eth/v1/node/peers interface (#12483)
* Fix the bug that return 500 in /eth/v1/node/peers interface

* Update node.go

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-06-02 03:27:17 +00:00
Radosław Kapka
c32b581e8e Add broadcast_validation to block publishing (#12432)
* day 1

* day 2

* day 2+

* day 3

* day 4

* making bazel happy

* PublishBlindedBlockV2

* remove file

* use lock in insertSeenProposerIndex

* remove EquivocationChecker interface

* update deps.bzl

* remove middleware json tags

* go mod tidy

* remove redundant return statements

* validate in handler

* improvements

* extract common code

* remove import

* sync test fix

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-06-01 11:22:49 +00:00
terencechain
e516a2004f Update next slot cache correctly under late task (#12462) 2023-05-31 08:50:37 -07:00
terencechain
cb65d8af96 Proposer RPC: make setExecutionData better (#12466) 2023-05-31 06:06:32 -07:00
Nishant Das
70152bf476 Copy All Field Tries For Late Blocks (#12461)
* add new thing

* only have it for late blocks

* comments

* change to lock

* add test

* Update beacon-chain/state/state-native/state_test.go

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-05-30 09:57:20 +00:00
Radosław Kapka
8aa688729d Cleanup of ProposerPayloadIDsCache (#12474)
* Cleanup of `ProposerPayloadIDsCache`

* one more comment

* Update beacon-chain/cache/payload_id.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

* Update beacon-chain/cache/payload_id.go

Co-authored-by: terencechain <terence@prysmaticlabs.com>

---------

Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-05-29 16:10:28 +00:00
Preston Van Loon
1ffc92999f p2p: Check peer threshold is met before giving up on ctx deadline (#12446)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-28 13:24:59 +00:00
terencechain
2dcef85f97 Add spec test for v1.4.0-alpha.0 (#12460)
* Fix spec test

* Fix sha

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-25 14:05:43 +00:00
Nishant Das
52da7b3de6 Release Lock Before Panicking (#12464) 2023-05-25 06:42:21 -07:00
terencechain
be16b64535 Remove SubmitBlindBlock context timeout (#12453) 2023-05-24 14:19:23 +00:00
terencechain
f4d3939b62 Add logs for build block times (#12452)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-24 13:37:26 +00:00
Nishant Das
245d8a29e0 Optimize Zerohash Comparisons In Forkchoice (#12458) 2023-05-24 09:58:02 +00:00
james-prysm
666188dfea Improve validator import logs (#12429)
* adding small ux improvement

* gaz

* rolling back dir test changes

* Update validator/accounts/accounts_import.go

* adding review suggestion

* missed else part of statement

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-05-23 15:41:41 -05:00
Preston Van Loon
cfa64ae013 Restore disable-peer-scorer flag (#12386)
* Revert "Make Peer Scorer Permanent Default (#12138)"

This reverts commit 4d28d69fd9.

* make peer scoring flag warning scary
2023-05-23 13:53:02 +00:00
Potuz
cd0f814f2e fixed erroneous panic (#12450) 2023-05-23 11:12:31 +00:00
Radosław Kapka
abc81e6dde Merge all block unblinding code into a single unblinder struct (#12240)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-05-23 11:38:52 +02:00
terencechain
6b26183e73 Add missing config yamls for domains (#12442)
* Add missing config yamls for domains

* Fix GetSpec test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 18:02:41 +00:00
Preston Van Loon
7fe935e94d Fix metric name from PR #12430 (#12445)
* Fix metric name from PR #12430

* @potuz can't spell 'unknown'

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 17:43:21 +00:00
Potuz
e0e7c71eb5 Fix sandwich attack on honest reorgs (#12418)
* Fix sandwich attack on honest reorgs

* fix test

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-22 17:23:20 +00:00
Potuz
c80019bd0b remove trivial helper (#12443) 2023-05-22 15:59:16 +00:00
james-prysm
8dfb92c605 reverting expiration logic on validator while using --enable-registration-cache (#12436)
* reverting expiration logic

* gaz
2023-05-22 14:54:09 +00:00
Potuz
9d192a3608 Remove unused function (#12439)
* Remove unused function

* gazelle
2023-05-22 11:09:08 -03:00
Nishant Das
51bde7a845 disable it (#12438) 2023-05-22 19:18:13 +08:00
kasey
385a317902 Revert initsync revert (#12431)
* Revert "Revert "BeaconBlocksByRange and BlobSidecarsByRange consistency (#123… (#12426)"

This reverts commit ddc1e48e05.

* fix metrics bug, add batch.next tests

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-19 16:59:13 +00:00
Potuz
b84dd40ba9 Use forkchoice to validate sync messages faster (#12430)
* Use forkchoice to validate sync messages faster

* add metric
2023-05-19 14:47:39 +00:00
kasey
aeaa72fdc2 fix deadlock between monitor service and init-sync (#12427)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-18 18:35:06 +00:00
kasey
ddc1e48e05 Revert "BeaconBlocksByRange and BlobSidecarsByRange consistency (#123… (#12426)
This reverts commit 73e4bdccbb.

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-18 18:01:26 +00:00
Nishant Das
f91159337b Fix Migration Of State (#12423) 2023-05-18 13:18:56 +00:00
Potuz
537236e1c9 Add aggregation metrics (#12417) 2023-05-17 12:18:59 -07:00
kasey
73e4bdccbb BeaconBlocksByRange and BlobSidecarsByRange consistency (#12383)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-17 12:16:10 +00:00
Potuz
f54bd64bdd Default aggregation ticker times (#12412)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-16 19:38:44 +00:00
james-prysm
d907cae595 persistent validator settings in validator client (#12354)
* WIP

* improving proposer settings store

* WIP persistent validator settings

* WIP persistent validator settings

* changing visibility level

* fixing some deepsource issues

* fixing more deepsource issues

* fixing json marshalling

* fix linting

* fixing tests

* fixing more tests

* fixing more tests

* fixing more tests

* fixing linting

* WIP fixing unit tests

* fixing remaining db tests

* converting json to protobuf

* fixing e2e

* k8s yaml library is used directly

* fixing linting

* fixing broken unit test

* reverting changes on e2e

* fixing linting

* fixing deepsource issue

* resolving some internal comments

* resolving some comments and adding more tests

* adding more unit tests

* gaz

* fixing flaking test

* fixing unit test contamination

* fixing deepsource issue

* resolving review item

* gaz
2023-05-16 14:08:49 -05:00
Potuz
be23773924 Don't use max cover on unaggregated atts nor check subgroup of validated signatures (#12350)
* Don't use max cover on unnaggregated atts

* Do not validate signature on the attestation package

* separate nil error checks

* fix unit tests
2023-05-16 17:06:26 +00:00
terencechain
29f6de1e96 Flip build block parallel feature flag (#12408) 2023-05-16 08:43:15 -07:00
Potuz
955a21fea4 revert revert of f6764fe62b (#12399) 2023-05-16 11:50:02 +00:00
Preston Van Loon
b4f1fea029 CI: fix docker image tagging (#12407) 2023-05-16 02:10:23 +00:00
kasey
f1b88d005d fix broken slasher service init (#12405)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2023-05-15 17:00:29 -05:00
Preston Van Loon
ee612d958a Update discord invite (#12403) 2023-05-15 13:54:19 +00:00
Nishant Das
09e22538f9 Support Capella Blocks for Tool (#12402)
* fix it

* fix it
2023-05-15 17:59:02 +08:00
terencechain
3b9e974a45 Add epoch and root to not a checkpt in forkchoice log (#12400)
* Add epoch number and root in not a checkpt in forkchoice log

* Update beacon-chain/blockchain/process_attestation_helpers.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Fix test

* Fix typo

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-14 06:02:36 +00:00
Potuz
ad749a40b6 Save to checkpoint cache if the nsc hits (#12398)
* Save to checkpoint cache if the nsc hits

Also move the head check before the checkpoint cache check

* add unit test
2023-05-13 09:54:33 -03:00
terencechain
9b13454457 Metrics: Invert too late and too early att received count (#12392)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 19:45:43 +00:00
Potuz
b9917807d8 Ignore some untimely attestations (#12387)
* Ignore some untimely attestations

* correct child slot check

* consider tips as viable for checkpoints

* deal with canonical blocks

* forkchoice unit tests

* blockchain readonly beacon state

* Ignore some untimely attestations

* correct child slot check

* consider tips as viable for checkpoints

* deal with canonical blocks

* forkchoice unit tests

* blockchain readonly beacon state

* Fix AttestationTargetState mock

* Fix ineffectual assignment lint

* Fix blockchain tests

* Fix build

* Add Nil check

* add comment on lock

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 16:55:33 +00:00
Raul Jordan
e5c9387cd9 Update Github Actions Go Version (#12391)
* update github actions

* use quotes or it is go 1.2 

lol

* Update gosec

* Update gosec

* Update go lint

* fix gosec violations

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-05-12 15:51:20 +00:00
Preston Van Loon
2c3b3b802a Revert "Add a new slot ticker and use it on attestation aggregation" (#12390)
This reverts commit f6764fe62b.
2023-05-12 14:49:37 +00:00
Simon
3ef3e1d13b fix-subnets-oom (#12388)
fix-subnets-oom, close iterator after using it
2023-05-12 07:52:17 -05:00
Nick Sullivan
5c00fcb84f Fix numerous spelling error and typos in the log messages, comments, and documentation. (#12385)
* Minor typos and spelling fixes (comments, logs, & docs only, no code changes)

* Fix seplling in log message

* Additional spelling tweaks based on review from @prestonvanloon
2023-05-11 20:45:43 +00:00
Nishant Das
aef22bf54e Add In Support For Builder in E2E (#12343)
* fix it up

* add gaz

* add changes in

* finally runs

* fix it

* add progress

* add capella support

* save progress

* remove debug logs

* cleanup

* remove log

* fix flag

* remove unused lock

* gaz

* change

* fix

* lint

* james review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-05-11 11:10:29 -05:00
Potuz
f6764fe62b Add a new slot ticker and use it on attestation aggregation (#12377)
* Add slot ticker with intervals

* add flags for aggregation duration

* misspelling

* hide flags

* fix flags and default durations

* lint

* wait for initial sync

* deep source

* add log

* Preston's review

* fix error message

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-05-10 12:48:51 +00:00
Preston Van Loon
07db0dc448 CI: Add support for buildbuddy uploads (#12378)
* Add build metadata

* Add buildbuddy flags

* more metadata

* fix latest tag

* fix branch

* revert branch change

* touch a file to trigger build

* remove unknown command

* fix script

* Update latest_version_tag.sh
2023-05-10 12:30:40 +00:00
Preston Van Loon
4b4e213a24 stategen: Pre-populate bls pubkey cache as part of stategen's Resume function (#11482)
* Pre-populate bls pubkey cache as part of state gen's Resume function. This adds some helpers and a benchmark to blst

* Do it async

* fix missing import

* lint

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: rauljordan <raul@prysmaticlabs.com>
2023-05-10 10:44:15 +00:00
kasey
7d9f36985e Fix initialization race (#12374)
* block all the sync startup code on init signal

* don't need chainStarted if everything blocks

* set empty clock by default to work around panics

* remove unused clock, zero-value for init-sync

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-05-10 04:09:15 +00:00
james-prysm
98f8ca4e34 reverting version check on bid header (#12376)
* adding reverting change

* adding additional tests and checks

* removing comment

* updating based on review suggestions

* fixing deepsource issues

* attempting to resolve dependencies

* removing dependency

* accidently flipped if statement with deepsource fix

* fixing unit test
2023-05-10 08:45:09 +08:00
james-prysm
535b38395e migrating code from PR#12343 (#12371) 2023-05-08 09:33:26 -05:00
terencechain
4bd7e8aefd WeiToGwei copy input big.int (#12370) 2023-05-07 15:15:08 +00:00
Raul Jordan
6f383f272a Do Not Panic on Broadcasting Nil Object (#12369) 2023-05-07 05:00:30 +00:00
terencechain
dbb8279fe6 Ran update-go-pbs.sh (#12359)
* Ran update-go-pbs.sh

* Fix import
2023-05-05 17:44:51 +00:00
Nishant Das
b6625554dd fix build (#12358) 2023-05-05 08:32:56 -05:00
Radosław Kapka
bd833e1c12 Use v1alpha1 server in block production (#12336)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2023-05-04 19:52:41 +02:00
320 changed files with 12056 additions and 4154 deletions

View File

@@ -43,4 +43,12 @@ build --flaky_test_attempts=5
# Better caching
build:nostamp --nostamp
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
# Build metadata
build --build_metadata=ROLE=CI
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
build --workspace_status_command=./hack/workspace_status_ci.sh
# Buildbuddy
build --bes_results_url=https://app.buildbuddy.io/invocation/
build --bes_backend=grpcs://remote.buildbuddy.io

View File

@@ -26,14 +26,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.20
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: '1.20'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -43,16 +43,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Go 1.19
- name: Set up Go 1.20
uses: actions/setup-go@v3
with:
go-version: 1.19
go-version: '1.20'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.50.1
version: v1.52.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -62,7 +62,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: 1.19
go-version: '1.20'
id: go
- name: Check out code into the Go module directory

View File

@@ -4,14 +4,14 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.3.0](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.3.0-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
[![Execution_API_Version 1.0.0-beta.2](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.2-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/prysmaticlabs)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
### Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
### Staking on Mainnet

View File

@@ -205,7 +205,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.3.0"
consensus_spec_version = "v1.4.0-alpha.1"
bls_test_version = "v0.1.1"
@@ -221,7 +221,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
sha256 = "1118a663be4a00ba00f0635eb20287157f2b2f993aed64335bfbcd04af424c2b",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -237,7 +237,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
sha256 = "acde6e10940d14f22277eda5b55b65a24623ac88e4c7a2e34134a6069f5eea82",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -253,7 +253,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
sha256 = "49c022f3a3478cea849ba8f877a9f7e4c1ded549edddc09993550bbc5bb192e1",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -268,7 +268,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
sha256 = "c3e246ff01f6b7b9e9e41939954a6ff89dfca7297415f88781809165fa83267c",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

20
api/client/BUILD.bazel Normal file
View File

@@ -0,0 +1,20 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"client.go",
"errors.go",
"options.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client",
visibility = ["//visibility:public"],
deps = ["@com_github_pkg_errors//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["client_test.go"],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
)

View File

@@ -6,11 +6,11 @@ go_library(
"checkpoint.go",
"client.go",
"doc.go",
"errors.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -39,6 +39,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api/client:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -6,10 +6,12 @@ import (
"path"
"github.com/pkg/errors"
base "github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/io/file"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
@@ -18,6 +20,8 @@ import (
"golang.org/x/mod/semver"
)
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
@@ -74,37 +78,40 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
if err != nil {
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
}
if s.Slot() != s.LatestBlockHeader().Slot {
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
}
sr, err := s.HashTreeRoot(ctx)
slot := s.LatestBlockHeader().Slot
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
header := s.LatestBlockHeader()
header.StateRoot = sr[:]
br, err := header.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error while computing block root using state data")
}
bb, err := client.GetBlock(ctx, IdFromRoot(br))
if err != nil {
return nil, errors.Wrapf(err, "error requesting block by root = %#x", br)
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
}
b, err := vu.UnmarshalBeaconBlock(bb)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
}
realBlockRoot, err := b.Block().HashTreeRoot()
br, err := b.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
}
bodyRoot, err := b.Block().Body().HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
}
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
if sbr != bodyRoot {
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
}
sr, err := s.HashTreeRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
}
log.
WithField("block_slot", b.Block().Slot()).
WithField("state_slot", s.Slot()).
WithField("state_root", sr).
WithField("block_root", br).
Info("Downloaded checkpoint sync state and block.")
return &OriginData{
st: s,
b: b,
@@ -140,7 +147,7 @@ func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*We
ws, err := client.GetWeakSubjectivity(ctx)
if err != nil {
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
if !errors.Is(err, ErrNotOK) {
if !errors.Is(err, base.ErrNotOK) {
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
}
// fall back to vanilla Beacon Node API method

View File

@@ -7,9 +7,9 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
@@ -66,11 +66,7 @@ func TestMarshalToEnvelope(t *testing.T) {
}
func TestFallbackVersionCheck(t *testing.T) {
c := &Client{
hc: &http.Client{},
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
c.hc.Transport = &testRT{rt: func(req *http.Request) (*http.Response, error) {
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
@@ -88,12 +84,13 @@ func TestFallbackVersionCheck(t *testing.T) {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
ctx := context.Background()
_, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
}
@@ -170,44 +167,41 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
Epoch: epoch,
}
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getWeakSubjectivityPath:
res.StatusCode = http.StatusOK
cp := struct {
Epoch string `json:"epoch"`
Root string `json:"root"`
}{
Epoch: fmt.Sprintf("%d", slots.ToEpoch(b.Block().Slot())),
Root: fmt.Sprintf("%#x", bRoot),
}
wsr := struct {
Checkpoint interface{} `json:"ws_checkpoint"`
StateRoot string `json:"state_root"`
}{
Checkpoint: cp,
StateRoot: fmt.Sprintf("%#x", wRoot),
}
rb, err := marshalToEnvelope(wsr)
require.NoError(t, err)
res.Body = io.NopCloser(bytes.NewBuffer(rb))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -266,42 +260,39 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
wsSerialized, err := wst.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case getNodeVersionPath:
res.StatusCode = http.StatusOK
b := bytes.NewBuffer(nil)
d := struct {
Version string `json:"version"`
}{
Version: "Lighthouse/v0.1.5 (Linux x86_64)",
}
encoded, err := marshalToEnvelope(d)
require.NoError(t, err)
b.Write(encoded)
res.Body = io.NopCloser(b)
case getWeakSubjectivityPath:
res.StatusCode = http.StatusNotFound
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
case renderGetStatePath(IdFromSlot(wSlot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
case renderGetBlockPath(IdFromRoot(bRoot)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
require.NoError(t, err)
@@ -315,21 +306,16 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
st, expectedEpoch := defaultTestHeadState(t, params.MainnetConfig())
serialized, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdHead):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
if req.URL.Path == renderGetStatePath(IdHead) {
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
require.NoError(t, err)
require.Equal(t, expectedEpoch, actualEpoch)
@@ -448,29 +434,24 @@ func TestDownloadFinalizedData(t *testing.T) {
ms, err := st.MarshalSSZ()
require.NoError(t, err)
hc := &http.Client{
Transport: &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromRoot(br)):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}},
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
res := &http.Response{Request: req}
switch req.URL.Path {
case renderGetStatePath(IdFinalized):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(ms))
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
res.StatusCode = http.StatusOK
res.Body = io.NopCloser(bytes.NewBuffer(mb))
default:
res.StatusCode = http.StatusInternalServerError
res.Body = io.NopCloser(bytes.NewBufferString(""))
}
return res, nil
}}
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
require.NoError(t, err)
// sanity check before we go through checkpoint
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
sb, err := c.GetState(ctx, IdFinalized)

View File

@@ -5,8 +5,6 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"path"
@@ -14,8 +12,8 @@ import (
"sort"
"strconv"
"text/template"
"time"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
@@ -54,8 +52,6 @@ const (
IdFinalized StateOrBlockId = "finalized"
)
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// IdFromRoot encodes a block root in the format expected by the API in places where a root can be used to identify
// a BeaconState or SignedBeaconBlock.
func IdFromRoot(r [32]byte) StateOrBlockId {
@@ -85,96 +81,22 @@ func idTemplate(ts string) func(StateOrBlockId) string {
return f
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
}
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
baseURL *url.URL
*client.Client
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
// NewClient returns a new Client that includes functions for rest calls to Beacon API.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: fmt.Sprintf("%s:%s", host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
type reqOption func(*http.Request)
func withSSZEncoding() reqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
}
return b, nil
}
func renderGetBlockPath(id StateOrBlockId) string {
return path.Join(getSignedBlockPath, string(id))
return &Client{c}, nil
}
// GetBlock retrieves the SignedBeaconBlock for the given block id.
@@ -184,7 +106,7 @@ func renderGetBlockPath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
blockPath := renderGetBlockPath(blockId)
b, err := c.get(ctx, blockPath, withSSZEncoding())
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
}
@@ -199,7 +121,7 @@ var getBlockRootTpl = idTemplate(getBlockRootPath)
// for the named identifiers.
func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]byte, error) {
rootPath := getBlockRootTpl(blockId)
b, err := c.get(ctx, rootPath)
b, err := c.Get(ctx, rootPath)
if err != nil {
return [32]byte{}, errors.Wrapf(err, "error requesting block root by id = %s", blockId)
}
@@ -222,7 +144,7 @@ var getForkTpl = idTemplate(getForkForStatePath)
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
// for the named identifiers.
func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fork, error) {
body, err := c.get(ctx, getForkTpl(stateId))
body, err := c.Get(ctx, getForkTpl(stateId))
if err != nil {
return nil, errors.Wrapf(err, "error requesting fork by state id = %s", stateId)
}
@@ -238,7 +160,7 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
body, err := c.get(ctx, getForkSchedulePath)
body, err := c.Get(ctx, getForkSchedulePath)
if err != nil {
return nil, errors.Wrap(err, "error requesting fork schedule")
}
@@ -256,7 +178,7 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
body, err := c.get(ctx, getConfigSpecPath)
body, err := c.Get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
@@ -279,7 +201,7 @@ var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*
func parseNodeVersion(v string) (*NodeVersion, error) {
groups := versionRE.FindStringSubmatch(v)
if len(groups) != 4 {
return nil, errors.Wrapf(ErrInvalidNodeVersion, "could not be parsed: %s", v)
return nil, errors.Wrapf(client.ErrInvalidNodeVersion, "could not be parsed: %s", v)
}
return &NodeVersion{
implementation: groups[1],
@@ -291,7 +213,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
b, err := c.get(ctx, getNodeVersionPath)
b, err := c.Get(ctx, getNodeVersionPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting node version")
}
@@ -318,7 +240,7 @@ func renderGetStatePath(id StateOrBlockId) string {
// The return value contains the ssz-encoded bytes.
func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte, error) {
statePath := path.Join(getStatePath, string(stateId))
b, err := c.get(ctx, statePath, withSSZEncoding())
b, err := c.Get(ctx, statePath, client.WithSSZEncoding())
if err != nil {
return nil, errors.Wrapf(err, "error requesting state by id = %s", stateId)
}
@@ -331,7 +253,7 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
// - finds the highest non-skipped block preceding the epoch
// - returns the htr of the found block and returns this + the value of state_root from the block
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
body, err := c.get(ctx, getWeakSubjectivityPath)
body, err := c.Get(ctx, getWeakSubjectivityPath)
if err != nil {
return nil, err
}
@@ -362,7 +284,7 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
u := c.BaseURL().ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
body, err := json.Marshal(request)
if err != nil {
return errors.Wrap(err, "failed to marshal JSON")
@@ -372,7 +294,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
return errors.Wrap(err, "invalid format, failed to create new POST request object")
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.hc.Do(req)
resp, err := c.Do(req)
if err != nil {
return err
}
@@ -401,7 +323,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apim
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
// Returns a struct representation of json response.
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
body, err := c.get(ctx, changeBLStoExecutionPath)
body, err := c.Get(ctx, changeBLStoExecutionPath)
if err != nil {
return nil, err
}
@@ -413,23 +335,6 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.B
return poolResponse, nil
}
func non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}
type forkResponse struct {
PreviousVersion string `json:"previous_version"`
CurrentVersion string `json:"current_version"`

View File

@@ -4,6 +4,7 @@ import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -17,17 +18,17 @@ func TestParseNodeVersion(t *testing.T) {
{
name: "empty string",
v: "",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "Prysm as the version string",
v: "Prysm",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "semver only",
v: "v2.0.6",
err: ErrInvalidNodeVersion,
err: client.ErrInvalidNodeVersion,
},
{
name: "complete version",
@@ -91,7 +92,7 @@ func TestValidHostname(t *testing.T) {
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
err: client.ErrMalformedHostname,
},
{
name: "hostname with port",
@@ -132,7 +133,7 @@ func TestValidHostname(t *testing.T) {
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.baseURL.ResolveReference(&url.URL{Path: c.path}).String())
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}

View File

@@ -1,13 +0,0 @@
package beacon
import "github.com/pkg/errors"
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version api response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")

View File

@@ -3,7 +3,6 @@ package builder
import (
"math/big"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -162,9 +161,6 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
if b.p == nil {
return nil, errors.New("builder bid is nil")
}
// We have to convert big endian to little endian because the value is coming from the execution layer.
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))

View File

@@ -11,7 +11,6 @@ import (
"net/url"
"strings"
"text/template"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -36,7 +35,6 @@ const (
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
var errMalformedRequest = errors.New("required request data are missing")
var errNotBlinded = errors.New("submitted block is not blinded")
var submitBlindedBlockTimeout = 3 * time.Second
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
@@ -292,8 +290,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
}
@@ -325,8 +321,6 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
}
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
defer cancel()
versionOpt := func(r *http.Request) {
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
}

View File

@@ -14,14 +14,17 @@ import (
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// SignedValidatorRegistration a struct for signed validator registrations.
type SignedValidatorRegistration struct {
*eth.SignedValidatorRegistrationV1
}
// ValidatorRegistration a struct for validator registrations.
type ValidatorRegistration struct {
*eth.ValidatorRegistrationV1
}
// MarshalJSON returns a json representation copy of signed validator registration.
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *ValidatorRegistration `json:"message"`
@@ -32,6 +35,7 @@ func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON returns a byte representation of signed validator registration from json.
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
if r.SignedValidatorRegistrationV1 == nil {
r.SignedValidatorRegistrationV1 = &eth.SignedValidatorRegistrationV1{}
@@ -48,6 +52,7 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
return nil
}
// MarshalJSON returns a json representation copy of validator registration.
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -62,6 +67,7 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON returns a byte representation of validator registration from json.
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
if r.ValidatorRegistrationV1 == nil {
r.ValidatorRegistrationV1 = &eth.ValidatorRegistrationV1{}
@@ -92,6 +98,7 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
var errInvalidUint256 = errors.New("invalid Uint256")
var errDecodeUint256 = errors.New("unable to decode into Uint256")
// Uint256 a wrapper representation of big.Int
type Uint256 struct {
*big.Int
}
@@ -118,7 +125,7 @@ func sszBytesToUint256(b []byte) (Uint256, error) {
return Uint256{Int: bi}, nil
}
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256.
func (s Uint256) SSZBytes() []byte {
if !isValidUint256(s.Int) {
return []byte{}
@@ -126,6 +133,7 @@ func (s Uint256) SSZBytes() []byte {
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
}
// UnmarshalJSON takes in a byte array and unmarshals the value in Uint256
func (s *Uint256) UnmarshalJSON(t []byte) error {
start := 0
end := len(t)
@@ -138,6 +146,7 @@ func (s *Uint256) UnmarshalJSON(t []byte) error {
return s.UnmarshalText(t[start:end])
}
// UnmarshalText takes in a byte array and unmarshals the text in Uint256
func (s *Uint256) UnmarshalText(t []byte) error {
if s.Int == nil {
s.Int = big.NewInt(0)
@@ -153,6 +162,7 @@ func (s *Uint256) UnmarshalText(t []byte) error {
return nil
}
// MarshalJSON returns a json byte representation of Uint256.
func (s Uint256) MarshalJSON() ([]byte, error) {
t, err := s.MarshalText()
if err != nil {
@@ -163,6 +173,7 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
return t, nil
}
// MarshalText returns a text byte representation of Uint256.
func (s Uint256) MarshalText() ([]byte, error) {
if !isValidUint256(s.Int) {
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
@@ -170,22 +181,27 @@ func (s Uint256) MarshalText() ([]byte, error) {
return []byte(s.String()), nil
}
// Uint64String is a custom type that allows marshalling from text to uint64 and vice versa.
type Uint64String uint64
// UnmarshalText takes a byte array and unmarshals the text in Uint64String.
func (s *Uint64String) UnmarshalText(t []byte) error {
u, err := strconv.ParseUint(string(t), 10, 64)
*s = Uint64String(u)
return err
}
// MarshalText returns a byte representation of the text from Uint64String.
func (s Uint64String) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%d", s)), nil
}
// VersionResponse is a JSON representation of a field in the builder API header response.
type VersionResponse struct {
Version string `json:"version"`
}
// ExecHeaderResponse is a JSON representation of the builder API header response for Bellatrix.
type ExecHeaderResponse struct {
Version string `json:"version"`
Data struct {
@@ -194,6 +210,7 @@ type ExecHeaderResponse struct {
} `json:"data"`
}
// ToProto returns a SignedBuilderBid from ExecHeaderResponse for Bellatrix.
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
@@ -205,6 +222,7 @@ func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
}, nil
}
// ToProto returns a BuilderBid Proto for Bellatrix.
func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
header, err := bb.Header.ToProto()
if err != nil {
@@ -217,31 +235,34 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
}, nil
}
// ToProto returns a ExecutionPayloadHeader for Bellatrix.
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
return &v1.ExecutionPayloadHeader{
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
}, nil
}
// BuilderBid is part of ExecHeaderResponse for Bellatrix.
type BuilderBid struct {
Header *ExecutionPayloadHeader `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// ExecutionPayloadHeader is a field in BuilderBid.
type ExecutionPayloadHeader struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -260,6 +281,7 @@ type ExecutionPayloadHeader struct {
*v1.ExecutionPayloadHeader
}
// MarshalJSON returns the JSON bytes representation of ExecutionPayloadHeader.
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeader
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas)
@@ -284,6 +306,7 @@ func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON takes in a JSON byte array and sets ExecutionPayloadHeader.
func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeader
uc := &UnmarshalCaller{}
@@ -297,11 +320,13 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
return err
}
// ExecPayloadResponse is the builder API /eth/v1/builder/blinded_blocks for Bellatrix.
type ExecPayloadResponse struct {
Version string `json:"version"`
Data ExecutionPayload `json:"data"`
}
// ExecutionPayload is a field of ExecPayloadResponse
type ExecutionPayload struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -319,33 +344,104 @@ type ExecutionPayload struct {
Transactions []hexutil.Bytes `json:"transactions"`
}
// ToProto returns a ExecutionPayload Proto from ExecPayloadResponse
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
return r.Data.ToProto()
}
// ToProto returns a ExecutionPayload Proto
func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = p.Transactions[i]
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
}
return &v1.ExecutionPayload{
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
Transactions: txs,
}, nil
}
// FromProto converts a proto execution payload type to our builder
// compatible payload type.
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayload{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
}
return ExecutionPayload{
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
BaseFeePerGas: bFee,
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
Transactions: txs,
}, nil
}
// FromProtoCapella converts a proto execution payload type for capella to our
// builder compatible payload type.
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
if err != nil {
return ExecutionPayloadCapella{}, err
}
txs := make([]hexutil.Bytes, len(payload.Transactions))
for i := range payload.Transactions {
txs[i] = bytesutil.SafeCopyBytes(payload.Transactions[i])
}
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
for i, w := range payload.Withdrawals {
withdrawals[i] = Withdrawal{
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
Address: bytesutil.SafeCopyBytes(w.Address),
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
}
}
return ExecutionPayloadCapella{
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
BlockNumber: Uint64String(payload.BlockNumber),
GasLimit: Uint64String(payload.GasLimit),
GasUsed: Uint64String(payload.GasUsed),
Timestamp: Uint64String(payload.Timestamp),
ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData),
BaseFeePerGas: bFee,
BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash),
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
type ExecHeaderResponseCapella struct {
Data struct {
Signature hexutil.Bytes `json:"signature"`
@@ -353,6 +449,7 @@ type ExecHeaderResponseCapella struct {
} `json:"data"`
}
// ToProto returns a SignedBuilderBidCapella Proto from ExecHeaderResponseCapella.
func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
@@ -360,10 +457,11 @@ func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, e
}
return &eth.SignedBuilderBidCapella{
Message: bb,
Signature: ehr.Data.Signature,
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
}, nil
}
// ToProto returns a BuilderBidCapella Proto.
func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
header, err := bb.Header.ToProto()
if err != nil {
@@ -371,37 +469,40 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
}
return &eth.BuilderBidCapella{
Header: header,
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}
// ToProto returns a ExecutionPayloadHeaderCapella Proto
func (h *ExecutionPayloadHeaderCapella) ToProto() (*v1.ExecutionPayloadHeaderCapella, error) {
return &v1.ExecutionPayloadHeaderCapella{
ParentHash: h.ParentHash,
FeeRecipient: h.FeeRecipient,
StateRoot: h.StateRoot,
ReceiptsRoot: h.ReceiptsRoot,
LogsBloom: h.LogsBloom,
PrevRandao: h.PrevRandao,
ParentHash: bytesutil.SafeCopyBytes(h.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(h.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(h.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(h.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(h.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(h.PrevRandao),
BlockNumber: uint64(h.BlockNumber),
GasLimit: uint64(h.GasLimit),
GasUsed: uint64(h.GasUsed),
Timestamp: uint64(h.Timestamp),
ExtraData: h.ExtraData,
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
BlockHash: h.BlockHash,
TransactionsRoot: h.TransactionsRoot,
WithdrawalsRoot: h.WithdrawalsRoot,
ExtraData: bytesutil.SafeCopyBytes(h.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(h.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(h.BlockHash),
TransactionsRoot: bytesutil.SafeCopyBytes(h.TransactionsRoot),
WithdrawalsRoot: bytesutil.SafeCopyBytes(h.WithdrawalsRoot),
}, nil
}
// BuilderBidCapella is field of ExecHeaderResponseCapella.
type BuilderBidCapella struct {
Header *ExecutionPayloadHeaderCapella `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// ExecutionPayloadHeaderCapella is a field in BuilderBidCapella.
type ExecutionPayloadHeaderCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -421,6 +522,7 @@ type ExecutionPayloadHeaderCapella struct {
*v1.ExecutionPayloadHeaderCapella
}
// MarshalJSON returns a JSON byte representation of ExecutionPayloadHeaderCapella.
func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
type MarshalCaller ExecutionPayloadHeaderCapella
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderCapella.BaseFeePerGas)
@@ -446,6 +548,7 @@ func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
})
}
// UnmarshalJSON takes a JSON byte array and sets ExecutionPayloadHeaderCapella.
func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
type UnmarshalCaller ExecutionPayloadHeaderCapella
uc := &UnmarshalCaller{}
@@ -459,11 +562,13 @@ func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
return err
}
// ExecPayloadResponseCapella is the builder API /eth/v1/builder/blinded_blocks for Capella.
type ExecPayloadResponseCapella struct {
Version string `json:"version"`
Data ExecutionPayloadCapella `json:"data"`
}
// ExecutionPayloadCapella is a field of ExecPayloadResponseCapella.
type ExecutionPayloadCapella struct {
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
@@ -482,43 +587,46 @@ type ExecutionPayloadCapella struct {
Withdrawals []Withdrawal `json:"withdrawals"`
}
// ToProto returns a ExecutionPayloadCapella Proto.
func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
return r.Data.ToProto()
}
// ToProto returns a ExecutionPayloadCapella Proto.
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
txs := make([][]byte, len(p.Transactions))
for i := range p.Transactions {
txs[i] = p.Transactions[i]
txs[i] = bytesutil.SafeCopyBytes(p.Transactions[i])
}
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
for i, w := range p.Withdrawals {
withdrawals[i] = &v1.Withdrawal{
Index: w.Index.Uint64(),
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
Address: w.Address,
Address: bytesutil.SafeCopyBytes(w.Address),
Amount: w.Amount.Uint64(),
}
}
return &v1.ExecutionPayloadCapella{
ParentHash: p.ParentHash,
FeeRecipient: p.FeeRecipient,
StateRoot: p.StateRoot,
ReceiptsRoot: p.ReceiptsRoot,
LogsBloom: p.LogsBloom,
PrevRandao: p.PrevRandao,
ParentHash: bytesutil.SafeCopyBytes(p.ParentHash),
FeeRecipient: bytesutil.SafeCopyBytes(p.FeeRecipient),
StateRoot: bytesutil.SafeCopyBytes(p.StateRoot),
ReceiptsRoot: bytesutil.SafeCopyBytes(p.ReceiptsRoot),
LogsBloom: bytesutil.SafeCopyBytes(p.LogsBloom),
PrevRandao: bytesutil.SafeCopyBytes(p.PrevRandao),
BlockNumber: uint64(p.BlockNumber),
GasLimit: uint64(p.GasLimit),
GasUsed: uint64(p.GasUsed),
Timestamp: uint64(p.Timestamp),
ExtraData: p.ExtraData,
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
BlockHash: p.BlockHash,
ExtraData: bytesutil.SafeCopyBytes(p.ExtraData),
BaseFeePerGas: bytesutil.SafeCopyBytes(p.BaseFeePerGas.SSZBytes()),
BlockHash: bytesutil.SafeCopyBytes(p.BlockHash),
Transactions: txs,
Withdrawals: withdrawals,
}, nil
}
// Withdrawal is a field of ExecutionPayloadCapella.
type Withdrawal struct {
Index Uint256 `json:"index"`
ValidatorIndex Uint256 `json:"validator_index"`
@@ -526,18 +634,22 @@ type Withdrawal struct {
Amount Uint256 `json:"amount"`
}
// SignedBlindedBeaconBlockBellatrix is the request object for builder API /eth/v1/builder/blinded_blocks.
type SignedBlindedBeaconBlockBellatrix struct {
*eth.SignedBlindedBeaconBlockBellatrix
}
// BlindedBeaconBlockBellatrix is a field in SignedBlindedBeaconBlockBellatrix.
type BlindedBeaconBlockBellatrix struct {
*eth.BlindedBeaconBlockBellatrix
}
// BlindedBeaconBlockBodyBellatrix is a field in BlindedBeaconBlockBellatrix.
type BlindedBeaconBlockBodyBellatrix struct {
*eth.BlindedBeaconBlockBodyBellatrix
}
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockBellatrix.
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockBellatrix `json:"message"`
@@ -548,6 +660,7 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBellatrix.
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -564,10 +677,12 @@ func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
})
}
// ProposerSlashing is a field in BlindedBeaconBlockBodyCapella.
type ProposerSlashing struct {
*eth.ProposerSlashing
}
// MarshalJSON returns a JSON byte array representation of ProposerSlashing.
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
@@ -578,10 +693,12 @@ func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
})
}
// SignedBeaconBlockHeader is a field of ProposerSlashing.
type SignedBeaconBlockHeader struct {
*eth.SignedBeaconBlockHeader
}
// MarshalJSON returns a JSON byte array representation of SignedBeaconBlockHeader.
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Header *BeaconBlockHeader `json:"message"`
@@ -592,10 +709,12 @@ func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
})
}
// BeaconBlockHeader is a field of SignedBeaconBlockHeader.
type BeaconBlockHeader struct {
*eth.BeaconBlockHeader
}
// MarshalJSON returns a JSON byte array representation of BeaconBlockHeader.
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -612,10 +731,12 @@ func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
})
}
// IndexedAttestation is a field of AttesterSlashing.
type IndexedAttestation struct {
*eth.IndexedAttestation
}
// MarshalJSON returns a JSON byte array representation of IndexedAttestation.
func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
indices := make([]string, len(a.IndexedAttestation.AttestingIndices))
for i := range a.IndexedAttestation.AttestingIndices {
@@ -632,10 +753,12 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
})
}
// AttesterSlashing is a field of a Beacon Block Body.
type AttesterSlashing struct {
*eth.AttesterSlashing
}
// MarshalJSON returns a JSON byte array representation of AttesterSlashing.
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Attestation1 *IndexedAttestation `json:"attestation_1"`
@@ -646,10 +769,12 @@ func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
})
}
// Checkpoint is a field of AttestationData.
type Checkpoint struct {
*eth.Checkpoint
}
// MarshalJSON returns a JSON byte array representation of Checkpoint.
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
@@ -660,10 +785,12 @@ func (c *Checkpoint) MarshalJSON() ([]byte, error) {
})
}
// AttestationData is a field of IndexedAttestation.
type AttestationData struct {
*eth.AttestationData
}
// MarshalJSON returns a JSON byte array representation of AttestationData.
func (a *AttestationData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -680,10 +807,12 @@ func (a *AttestationData) MarshalJSON() ([]byte, error) {
})
}
// Attestation is a field of Beacon Block Body.
type Attestation struct {
*eth.Attestation
}
// MarshalJSON returns a JSON byte array representation of Attestation.
func (a *Attestation) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
@@ -696,10 +825,12 @@ func (a *Attestation) MarshalJSON() ([]byte, error) {
})
}
// DepositData is a field of Deposit.
type DepositData struct {
*eth.Deposit_Data
}
// MarshalJSON returns a JSON byte array representation of DepositData.
func (d *DepositData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
PublicKey hexutil.Bytes `json:"pubkey"`
@@ -714,10 +845,12 @@ func (d *DepositData) MarshalJSON() ([]byte, error) {
})
}
// Deposit is a field of Beacon Block Body.
type Deposit struct {
*eth.Deposit
}
// MarshalJSON returns a JSON byte array representation of Deposit.
func (d *Deposit) MarshalJSON() ([]byte, error) {
proof := make([]hexutil.Bytes, len(d.Proof))
for i := range d.Proof {
@@ -732,10 +865,12 @@ func (d *Deposit) MarshalJSON() ([]byte, error) {
})
}
// SignedVoluntaryExit is a field of Beacon Block Body.
type SignedVoluntaryExit struct {
*eth.SignedVoluntaryExit
}
// MarshalJSON returns a JSON byte array representation of SignedVoluntaryExit.
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *VoluntaryExit `json:"message"`
@@ -746,10 +881,12 @@ func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
})
}
// VoluntaryExit is a field in SignedVoluntaryExit
type VoluntaryExit struct {
*eth.VoluntaryExit
}
// MarshalJSON returns a JSON byte array representation of VoluntaryExit
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch"`
@@ -760,10 +897,12 @@ func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
})
}
// SyncAggregate is a field of Beacon Block Body.
type SyncAggregate struct {
*eth.SyncAggregate
}
// MarshalJSON returns a JSON byte array representation of SyncAggregate.
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
@@ -774,10 +913,12 @@ func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
})
}
// Eth1Data is a field of Beacon Block Body.
type Eth1Data struct {
*eth.Eth1Data
}
// MarshalJSON returns a JSON byte array representation of Eth1Data.
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DepositRoot hexutil.Bytes `json:"deposit_root"`
@@ -790,6 +931,7 @@ func (e *Eth1Data) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyBellatrix.
func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits))
for i := range b.BlindedBeaconBlockBodyBellatrix.VoluntaryExits {
@@ -836,10 +978,12 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
})
}
// SignedBLSToExecutionChange is a field in Beacon Block Body for capella and above.
type SignedBLSToExecutionChange struct {
*eth.SignedBLSToExecutionChange
}
// MarshalJSON returns a JSON byte array representation of SignedBLSToExecutionChange.
func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BLSToExecutionChange `json:"message"`
@@ -850,10 +994,12 @@ func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
})
}
// BLSToExecutionChange is a field in SignedBLSToExecutionChange.
type BLSToExecutionChange struct {
*eth.BLSToExecutionChange
}
// MarshalJSON returns a JSON byte array representation of BLSToExecutionChange.
func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
ValidatorIndex string `json:"validator_index"`
@@ -866,18 +1012,22 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
})
}
// SignedBlindedBeaconBlockCapella is part of the request object sent to builder API /eth/v1/builder/blinded_blocks for Capella.
type SignedBlindedBeaconBlockCapella struct {
*eth.SignedBlindedBeaconBlockCapella
}
// BlindedBeaconBlockCapella is a field in SignedBlindedBeaconBlockCapella.
type BlindedBeaconBlockCapella struct {
*eth.BlindedBeaconBlockCapella
}
// BlindedBeaconBlockBodyCapella is a field in BlindedBeaconBlockCapella.
type BlindedBeaconBlockBodyCapella struct {
*eth.BlindedBeaconBlockBodyCapella
}
// MarshalJSON returns a JSON byte array representation of SignedBlindedBeaconBlockCapella.
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockCapella `json:"message"`
@@ -888,6 +1038,7 @@ func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockCapella
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
@@ -904,6 +1055,7 @@ func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
})
}
// MarshalJSON returns a JSON byte array representation of BlindedBeaconBlockBodyCapella
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
for i := range b.VoluntaryExits {
@@ -956,6 +1108,7 @@ func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
})
}
// ErrorMessage is a JSON representation of the builder API's returned error message.
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`

97
api/client/client.go Normal file
View File

@@ -0,0 +1,97 @@
package client
import (
"context"
"io"
"net"
"net/http"
"net/url"
"github.com/pkg/errors"
)
// Client is a wrapper object around the HTTP client.
type Client struct {
hc *http.Client
baseURL *url.URL
token string
}
// NewClient constructs a new client with the provided options (ex WithTimeout).
// `host` is the base host + port used to construct request urls. This value can be
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
u, err := urlForHost(host)
if err != nil {
return nil, err
}
c := &Client{
hc: &http.Client{},
baseURL: u,
}
for _, o := range opts {
o(c)
}
return c, nil
}
// Token returns the bearer token used for jwt authentication
func (c *Client) Token() string {
return c.token
}
// BaseURL returns the base url of the client
func (c *Client) BaseURL() *url.URL {
return c.baseURL
}
// Do execute the request against the http client
func (c *Client) Do(req *http.Request) (*http.Response, error) {
return c.hc.Do(req)
}
func urlForHost(h string) (*url.URL, error) {
// try to parse as url (being permissive)
u, err := url.Parse(h)
if err == nil && u.Host != "" {
return u, nil
}
// try to parse as host:port
host, port, err := net.SplitHostPort(h)
if err != nil {
return nil, ErrMalformedHostname
}
return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil
}
// NodeURL returns a human-readable string representation of the beacon node base url.
func (c *Client) NodeURL() string {
return c.baseURL.String()
}
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
u := c.baseURL.ResolveReference(&url.URL{Path: path})
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
for _, o := range opts {
o(req)
}
r, err := c.hc.Do(req)
if err != nil {
return nil, err
}
defer func() {
err = r.Body.Close()
}()
if r.StatusCode != http.StatusOK {
return nil, Non200Err(r)
}
b, err := io.ReadAll(r.Body)
if err != nil {
return nil, errors.Wrap(err, "error reading http response body")
}
return b, nil
}

48
api/client/client_test.go Normal file
View File

@@ -0,0 +1,48 @@
package client
import (
"net/url"
"testing"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidHostname(t *testing.T) {
cases := []struct {
name string
hostArg string
path string
joined string
err error
}{
{
name: "hostname without port",
hostArg: "mydomain.org",
err: ErrMalformedHostname,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
cl, err := NewClient(c.hostArg)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, c.joined, cl.BaseURL().ResolveReference(&url.URL{Path: c.path}).String())
})
}
}
func TestWithAuthenticationToken(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500", WithAuthenticationToken("my token"))
require.NoError(t, err)
require.Equal(t, cl.Token(), "my token")
}
func TestBaseURL(t *testing.T) {
cl, err := NewClient("https://www.offchainlabs.com:3500")
require.NoError(t, err)
require.Equal(t, "www.offchainlabs.com", cl.BaseURL().Hostname())
require.Equal(t, "3500", cl.BaseURL().Port())
}

40
api/client/errors.go Normal file
View File

@@ -0,0 +1,40 @@
package client
import (
"fmt"
"io"
"net/http"
"github.com/pkg/errors"
)
// ErrMalformedHostname is used to indicate if a host name's format is incorrect.
var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code.
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
var ErrNotOK = errors.New("did not receive 2xx response from API")
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized.
var ErrInvalidNodeVersion = errors.New("invalid node version response")
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
func Non200Err(response *http.Response) error {
bodyBytes, err := io.ReadAll(response.Body)
var body string
if err != nil {
body = "(Unable to read response body.)"
} else {
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
switch response.StatusCode {
case 404:
return errors.Wrap(ErrNotFound, msg)
default:
return errors.Wrap(ErrNotOK, msg)
}
}

48
api/client/options.go Normal file
View File

@@ -0,0 +1,48 @@
package client
import (
"fmt"
"net/http"
"time"
)
// ReqOption is a request functional option.
type ReqOption func(*http.Request)
// WithSSZEncoding is a request functional option that adds SSZ encoding header.
func WithSSZEncoding() ReqOption {
return func(req *http.Request) {
req.Header.Set("Accept", "application/octet-stream")
}
}
// WithAuthorizationToken is a request functional option that adds header for authorization token.
func WithAuthorizationToken(token string) ReqOption {
return func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
}
}
// ClientOpt is a functional option for the Client type (http.Client wrapper)
type ClientOpt func(*Client)
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
func WithTimeout(timeout time.Duration) ClientOpt {
return func(c *Client) {
c.hc.Timeout = timeout
}
}
// WithRoundTripper replaces the underlying HTTP's transport with a custom one.
func WithRoundTripper(t http.RoundTripper) ClientOpt {
return func(c *Client) {
c.hc.Transport = t
}
}
// WithAuthenticationToken sets an oauth token to be used.
func WithAuthenticationToken(token string) ClientOpt {
return func(c *Client) {
c.token = token
}
}

View File

@@ -0,0 +1,13 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/validator",
visibility = ["//visibility:public"],
deps = [
"//api/client:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,121 @@
package validator
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
)
const (
localKeysPath = "/eth/v1/keystores"
remoteKeysPath = "/eth/v1/remotekeys"
feeRecipientPath = "/eth/v1/validator/{pubkey}/feerecipient"
)
// Client provides a collection of helper methods for calling the Keymanager API endpoints.
type Client struct {
*client.Client
}
// NewClient returns a new Client that includes functions for REST calls to keymanager APIs.
func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
c, err := client.NewClient(host, opts...)
if err != nil {
return nil, err
}
return &Client{c}, nil
}
// GetValidatorPubKeys gets the current list of web3signer or the local validator public keys in hex format.
func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
jsonlocal, err := c.GetLocalValidatorKeys(ctx)
if err != nil {
return nil, err
}
jsonremote, err := c.GetRemoteValidatorKeys(ctx)
if err != nil {
return nil, err
}
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Keystores) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Keystores {
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
}
for index := range jsonremote.Keystores {
hexKeys[jsonremote.Keystores[index].Pubkey] = true
}
keys := make([]string, 0)
for k := range hexKeys {
keys = append(keys, k)
}
return keys, nil
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}
return jsonlocal, nil
}
// GetRemoteValidatorKeys calls the keymanager APIs for web3signer validator keys
func (c *Client) GetRemoteValidatorKeys(ctx context.Context) (*apimiddleware.ListRemoteKeysResponseJson, error) {
remoteBytes, err := c.Get(ctx, remoteKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
if !strings.Contains(err.Error(), "Prysm Wallet is not of type Web3Signer") {
return nil, err
}
}
jsonremote := &apimiddleware.ListRemoteKeysResponseJson{}
if len(remoteBytes) != 0 {
if err := json.Unmarshal(remoteBytes, jsonremote); err != nil {
return nil, errors.Wrap(err, "failed to parse remote keystore list")
}
}
return jsonremote, nil
}
// GetFeeRecipientAddresses takes a list of validators in hex format and returns an equal length list of fee recipients in hex format.
func (c *Client) GetFeeRecipientAddresses(ctx context.Context, validators []string) ([]string, error) {
feeRecipients := make([]string, len(validators))
for index, validator := range validators {
feejson, err := c.GetFeeRecipientAddress(ctx, validator)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("keymanager API failed to retrieve fee recipient for validator %s", validators[index]))
}
if feejson.Data == nil {
continue
}
feeRecipients[index] = feejson.Data.Ethaddress
}
return feeRecipients, nil
}
// GetFeeRecipientAddress takes a public key and calls the keymanager API to return its fee recipient.
func (c *Client) GetFeeRecipientAddress(ctx context.Context, pubkey string) (*apimiddleware.GetFeeRecipientByPubkeyResponseJson, error) {
path := strings.Replace(feeRecipientPath, "{pubkey}", pubkey, 1)
b, err := c.Get(ctx, path, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
feejson := &apimiddleware.GetFeeRecipientByPubkeyResponseJson{}
if err := json.Unmarshal(b, feejson); err != nil {
return nil, errors.Wrap(err, "failed to parse fee recipient")
}
return feejson, nil
}

View File

@@ -144,6 +144,7 @@ func (f *Feed) Send(value interface{}) (nsent int) {
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()

View File

@@ -32,6 +32,8 @@ func TestFeedPanics(t *testing.T) {
f.Send(2)
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
// Validate it doesn't deadlock.
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
}
{
var f Feed

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -380,6 +381,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -25,8 +25,11 @@ var (
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
// ErrNotCheckpoint is returned when a given checkpoint is not a
// checkpoint in any chain known to forkchoice
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
)
// An invalid block is the block that fails state transition based on the core protocol rules.

View File

@@ -53,7 +53,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot prim
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
//
// Spec definition:
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
@@ -157,7 +157,11 @@ func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives
if headState == nil || headState.IsNil() {
return nil, errors.New("nil state")
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slot)
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, err
}
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, slot)
if err != nil {
return nil, err
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -15,7 +16,7 @@ import (
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Current period
@@ -38,7 +39,7 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
@@ -66,7 +67,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
@@ -81,7 +82,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
c := &Service{}
c := &Service{cfg: &config{BeaconDB: dbTest.SetupDB(t)}}
c.head = &head{state: s}
wanted, err := signing.Domain(s.Fork(), slots.ToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorsRoot())

View File

@@ -53,7 +53,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
case err != nil:
return err
default:

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/async"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -18,7 +19,17 @@ import (
)
// getAttPreState retrieves the att pre state by either from the cache or the DB.
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch == headEpoch {
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return s.HeadStateReadOnly(ctx)
}
}
}
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
// allowing us to behave smarter in terms of how this function is used concurrently.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
@@ -32,7 +43,36 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
if cachedState != nil && !cachedState.IsNil() {
return cachedState, nil
}
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
// but is cheap
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil, errors.Wrap(err, "could not compute epoch start")
}
cachedState = transition.NextSlotState(c.Root, slot)
if cachedState != nil && !cachedState.IsNil() {
if cachedState.Slot() != slot {
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
if err != nil {
return nil, errors.Wrap(err, "could not process slots")
}
}
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
}
return cachedState, nil
}
// Do not process attestations for old non viable checkpoints otherwise
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
if err != nil {
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
}
if !ok {
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
}
// Fallback to state regeneration.
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)

View File

@@ -27,11 +27,20 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
blkWithoutState := util.NewBeaconBlock()
blkWithoutState.Block.Slot = 0
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
BlkWithOutStateRoot, err := blkWithoutState.Block.HashTreeRoot()
cp := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
blkWithStateBadAtt := util.NewBeaconBlock()
blkWithStateBadAtt.Block.Slot = 1
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
cp = &ethpb.Checkpoint{Root: r[:]}
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
require.NoError(t, err)
@@ -42,7 +51,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
blkWithValidState := util.NewBeaconBlock()
blkWithValidState.Block.Slot = 2
blkWithValidState.Block.Slot = 32
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
@@ -57,6 +66,10 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
service.head = &head{
state: st,
}
tests := []struct {
name string
a *ethpb.Attestation
@@ -67,11 +80,6 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Root: make([]byte, 32)}}}),
wantedErr: "slot 32 does not match target epoch 0",
},
{
name: "no pre state for attestations's target block",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Target: &ethpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
wantedErr: "could not get pre state for epoch 0",
},
{
name: "process attestation doesn't match current epoch",
a: util.HydrateAttestation(&ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: &ethpb.Checkpoint{Epoch: 100,
@@ -160,6 +168,9 @@ func TestStore_SaveCheckpointState(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s1, err := service.getAttPreState(ctx, cp1)
require.NoError(t, err)
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
@@ -167,8 +178,17 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp2 := &ethpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
s2, err := service.getAttPreState(ctx, cp2)
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s2, err = service.getAttPreState(ctx, cp2)
require.NoError(t, err)
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
s1, err = service.getAttPreState(ctx, cp1)
@@ -187,6 +207,10 @@ func TestStore_SaveCheckpointState(t *testing.T) {
cp3 := &ethpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
s3, err := service.getAttPreState(ctx, cp3)
require.NoError(t, err)
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
@@ -195,11 +219,18 @@ func TestStore_SaveCheckpointState(t *testing.T) {
func TestStore_UpdateCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
baseState, _ := util.DeterministicGenesisState(t, 1)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
returned, err := service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
@@ -209,8 +240,16 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
epoch = 2
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
blk = util.NewBeaconBlock()
blk.Block.Slot = 64
r2, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
newCheckpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r2[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
returned, err = service.getAttPreState(ctx, newCheckpoint)
require.NoError(t, err)
s, err := slots.EpochStart(newCheckpoint.Epoch)
@@ -289,3 +328,22 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
}
func TestGetAttPreState_HeadState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
baseState, _ := util.DeterministicGenesisState(t, 1)
epoch := primitives.Epoch(1)
blk := util.NewBeaconBlock()
r1, err := blk.Block.HashTreeRoot()
require.NoError(t, err)
checkpoint := &ethpb.Checkpoint{Epoch: epoch, Root: r1[:]}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
_, err = service.getAttPreState(ctx, checkpoint)
require.NoError(t, err)
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
}

View File

@@ -285,7 +285,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedB
}()
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
@@ -483,14 +483,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
if err != nil {
return err
}
@@ -651,26 +651,24 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
func (s *Service) spawnLateBlockTasksLoop() {
go func() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
func (s *Service) runLateBlockTasks() {
_, err := s.clockWaiter.WaitForClock(s.ctx)
if err != nil {
log.WithError(err).Error("runLateBlockTasks encountered an error waiting for initialization")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
for {
select {
case <-ticker.C():
s.lateBlockTasks(s.ctx)
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}()
}
}
// lateBlockTasks is called 4 seconds into the slot and performs tasks
@@ -685,12 +683,26 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
headRoot := s.headRoot()
headState := s.headState(ctx)
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
return
}
s.headLock.RLock()
headBlock, err := s.headBlock()
if err != nil {
@@ -698,8 +710,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
return
}
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
@@ -709,11 +719,4 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
}
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}

View File

@@ -636,7 +636,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s))
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
}
@@ -1875,9 +1875,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
}

View File

@@ -26,7 +26,7 @@ const reorgLateBlockCountAttestations = 2 * time.Second
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
// root of an attestation's target checkpoint.
type AttestationStateFetcher interface {
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error)
}
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
@@ -37,7 +37,7 @@ type AttestationReceiver interface {
}
// AttestationTargetState returns the pre state of attestation.
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
ss, err := slots.EpochStart(target.Epoch)
if err != nil {
return nil, err
@@ -45,6 +45,9 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
return nil, err
}
// We acquire the lock here instead than on gettAttPreState because that function gets called from UpdateHead that holds a write lock
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.getAttPreState(ctx, target)
}

View File

@@ -130,7 +130,7 @@ func (s *Service) Start() {
}
}
s.spawnProcessAttestationsRoutine()
s.spawnLateBlockTasksLoop()
go s.runLateBlockTasks()
}
// Stop the blockchain service's main event loop and associated goroutines.

View File

@@ -320,7 +320,7 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
}
// AttestationTargetState mocks AttestationTargetState method in chain service.
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
return s.State, nil
}

View File

@@ -3,6 +3,7 @@ package builder
import (
"context"
"testing"
"time"
buildertesting "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing"
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
@@ -38,6 +39,21 @@ func Test_RegisterValidator(t *testing.T) {
assert.Equal(t, true, builder.RegisteredVals[pubkey])
}
func Test_RegisterValidator_WithCache(t *testing.T) {
ctx := context.Background()
headFetcher := &blockchainTesting.ChainService{}
builder := buildertesting.NewClient()
s, err := NewService(ctx, WithRegistrationCache(), WithHeadFetcher(headFetcher), WithBuilderClient(&builder))
require.NoError(t, err)
pubkey := bytesutil.ToBytes48([]byte("pubkey"))
var feeRecipient [20]byte
reg := &eth.ValidatorRegistrationV1{Pubkey: pubkey[:], Timestamp: uint64(time.Now().UTC().Unix()), FeeRecipient: feeRecipient[:]}
require.NoError(t, s.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{{Message: reg}}))
registration, err := s.registrationCache.RegistrationByIndex(0)
require.NoError(t, err)
require.DeepEqual(t, reg, registration)
}
func Test_BuilderMethodsWithouClient(t *testing.T) {
s, err := NewService(context.Background())
require.NoError(t, err)

View File

@@ -58,7 +58,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, _ interfaces.
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch {
if slots.ToEpoch(slot) >= params.BeaconConfig().CapellaForkEpoch || s.BidCapella != nil {
return builder.WrappedSignedBuilderBidCapella(s.BidCapella)
}
w, err := builder.WrappedSignedBuilderBid(s.Bid)

View File

@@ -34,6 +34,7 @@ go_library(
deps = [
"//beacon-chain/state:go_default_library",
"//cache/lru:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
@@ -87,7 +88,6 @@ go_test(
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 4))
// Mimick finalized deposit trie fetch.
// Mimic finalized deposit trie fetch.
fd := dc.FinalizedDeposits(context.Background())
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
insertIndex := fd.MerkleTrieIndex + 1

View File

@@ -4,35 +4,41 @@ import (
"bytes"
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
const keyLength = 40
const vIdLength = 8
const pIdLength = 8
const vpIdsLength = vIdLength + pIdLength
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
// The key is the concatenation of the slot and the block root.
// The value is the concatenation of the proposer and payload IDs, 8 bytes each.
type ProposerPayloadIDsCache struct {
slotToProposerAndPayloadIDs map[[40]byte][vpIdsLength]byte
slotToProposerAndPayloadIDs map[[keyLength]byte][vpIdsLength]byte
sync.RWMutex
}
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
return &ProposerPayloadIDsCache{
slotToProposerAndPayloadIDs: make(map[[40]byte][vpIdsLength]byte),
slotToProposerAndPayloadIDs: make(map[[keyLength]byte][vpIdsLength]byte),
}
}
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r [32]byte) (primitives.ValidatorIndex, [8]byte, bool) {
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot and head root to build the block.
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(
slot primitives.Slot,
r [fieldparams.RootLength]byte,
) (primitives.ValidatorIndex, [pIdLength]byte, bool) {
f.RLock()
defer f.RUnlock()
ids, ok := f.slotToProposerAndPayloadIDs[idKey(slot, r)]
if !ok {
return 0, [8]byte{}, false
return 0, [pIdLength]byte{}, false
}
vId := ids[:vIdLength]
@@ -43,8 +49,13 @@ func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot primitives.Slot, r
return primitives.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
}
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot, vId primitives.ValidatorIndex, pId [8]byte, r [32]byte) {
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot and head root to build block.
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(
slot primitives.Slot,
vId primitives.ValidatorIndex,
pId [pIdLength]byte,
r [fieldparams.RootLength]byte,
) {
f.Lock()
defer f.Unlock()
var vIdBytes [vIdLength]byte
@@ -63,7 +74,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot primitives.Slot,
}
}
// PrunePayloadIDs removes the payload id entries that's current than input slot.
// PrunePayloadIDs removes the payload ID entries older than input slot.
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
f.Lock()
defer f.Unlock()
@@ -76,8 +87,8 @@ func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot primitives.Slot) {
}
}
func idKey(slot primitives.Slot, r [32]byte) [40]byte {
var k [40]byte
func idKey(slot primitives.Slot, r [fieldparams.RootLength]byte) [keyLength]byte {
var k [keyLength]byte
copy(k[:], append(bytesutil.Uint64ToBytesBigEndian(uint64(slot)), r[:]...))
return k
}

View File

@@ -3,15 +3,11 @@ package cache
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -38,33 +34,10 @@ func (regCache *RegistrationCache) RegistrationByIndex(id primitives.ValidatorIn
regCache.lock.RUnlock()
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
isExpired, err := RegistrationTimeStampExpired(v.Timestamp)
if err != nil {
return nil, errors.Wrapf(err, "failed to check registration expiration")
}
if isExpired {
regCache.lock.RUnlock()
regCache.lock.Lock()
defer regCache.lock.Unlock()
delete(regCache.indexToRegistration, id)
log.Warnf("registration for validator index %d expired at unix time %d", id, v.Timestamp)
return nil, errors.Wrapf(ErrNotFoundRegistration, "validator id %d", id)
}
regCache.lock.RUnlock()
return v, nil
}
func RegistrationTimeStampExpired(ts uint64) (bool, error) {
// safely convert unint64 to int64
i, err := math.Int(ts)
if err != nil {
return false, err
}
expiryDuration := params.BeaconConfig().RegistrationDuration
// registered time + expiration duration < current time = expired
return time.Unix(int64(i), 0).Add(expiryDuration).Before(time.Now()), nil
}
// UpdateIndexToRegisteredMap adds or updates values in the cache based on the argument.
func (regCache *RegistrationCache) UpdateIndexToRegisteredMap(ctx context.Context, m map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1) {
_, span := trace.StartSpan(ctx, "RegistrationCache.UpdateIndexToRegisteredMap")

View File

@@ -6,15 +6,12 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestRegistrationCache(t *testing.T) {
hook := logTest.NewGlobal()
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex := primitives.ValidatorIndex(1)
@@ -31,29 +28,14 @@ func TestRegistrationCache(t *testing.T) {
reg, err := cache.RegistrationByIndex(validatorIndex)
require.NoError(t, err)
require.Equal(t, string(reg.Pubkey), string(pubkey))
t.Run("Registration expired", func(t *testing.T) {
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
_, err := cache.RegistrationByIndex(validatorIndex2)
require.ErrorContains(t, "no validator registered", err)
require.LogsContain(t, hook, "expired")
})
t.Run("Registration close to expiration still passes", func(t *testing.T) {
t.Run("successfully updates", func(t *testing.T) {
pubkey, err := hexutil.Decode("0x88247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
require.NoError(t, err)
validatorIndex2 := primitives.ValidatorIndex(2)
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs - 5 seconds
m[validatorIndex2] = &ethpb.ValidatorRegistrationV1{
FeeRecipient: []byte{},
GasLimit: 100,
Timestamp: uint64(time.Now().Add(-1 * overExpirationPadTime).Unix()),
Timestamp: uint64(time.Now().Unix()),
Pubkey: pubkey,
}
cache.UpdateIndexToRegisteredMap(context.Background(), m)
@@ -62,21 +44,3 @@ func TestRegistrationCache(t *testing.T) {
require.Equal(t, string(reg.Pubkey), string(pubkey))
})
}
func Test_RegistrationTimeStampExpired(t *testing.T) {
// expiration set at 3 epochs
t.Run("expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*4) // 4 epochs
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, true, isExpired)
})
t.Run("is not expired registration", func(t *testing.T) {
overExpirationPadTime := time.Second * time.Duration((params.BeaconConfig().SecondsPerSlot*uint64(params.BeaconConfig().SlotsPerEpoch)*3)-5) // 3 epochs -5 seconds
ts := uint64(time.Now().Add(-1 * overExpirationPadTime).Unix())
isExpired, err := RegistrationTimeStampExpired(ts)
require.NoError(t, err)
require.Equal(t, false, isExpired)
})
}

View File

@@ -2,6 +2,7 @@ package altair
import (
"context"
goErrors "errors"
"fmt"
"time"
@@ -22,6 +23,10 @@ import (
const maxRandomByte = uint64(1<<8 - 1)
var (
ErrTooLate = errors.New("sync message is too late")
)
// ValidateNilSyncContribution validates the following fields are not nil:
// -the contribution and proof itself
// -the message within contribution and proof
@@ -217,7 +222,7 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
upperBound := time.Now().Add(clockDisparity)
// Verify sync message slot is within the time range.
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
return fmt.Errorf(
syncErr := fmt.Errorf(
"sync message time %v (slot %d) not within allowable range of %v (slot %d) to %v (slot %d)",
messageTime,
slot,
@@ -226,6 +231,11 @@ func ValidateSyncMessageTime(slot primitives.Slot, genesisTime time.Time, clockD
upperBound,
uint64(upperBound.Unix()-genesisTime.Unix())/params.BeaconConfig().SecondsPerSlot,
)
// Wrap error message if sync message is too late.
if messageTime.Before(lowerBound) {
syncErr = goErrors.Join(ErrTooLate, syncErr)
}
return syncErr
}
return nil
}

View File

@@ -61,7 +61,7 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
}
payload, err := body.Execution()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
case errors.Is(err, consensus_types.ErrUnsupportedField):
return false, nil
case err != nil:
return false, err

View File

@@ -68,7 +68,6 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -8,13 +8,16 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
var (
ErrTooLate = errors.New("attestation is too late")
)
// ValidateNilAttestation checks if any composite field of input attestation is nil.
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
@@ -66,25 +69,6 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
}
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
for i := 0; i < len(sigs); i++ {
sigs[i], err = bls.SignatureFromBytes(attestations[i].Signature)
if err != nil {
return nil, err
}
}
return bls.AggregateSignatures(sigs), nil
}
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation *ethpb.Attestation) bool {
@@ -183,11 +167,11 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooEarlyCount.Inc()
return attError
attReceivedTooLateCount.Inc()
return errors.Join(ErrTooLate, attError)
}
if attTime.After(upperBounds) {
attReceivedTooLateCount.Inc()
attReceivedTooEarlyCount.Inc()
return attError
}
return nil

View File

@@ -10,8 +10,6 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -45,44 +43,6 @@ func TestAttestation_IsAggregator(t *testing.T) {
})
}
func TestAttestation_AggregateSignature(t *testing.T) {
t.Run("verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := bytesutil.ToBytes32([]byte("hello"))
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg[:])
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts)
require.NoError(t, err)
assert.Equal(t, true, aggSig.FastAggregateVerify(pubkeys, msg), "Signature did not verify")
})
t.Run("not verified", func(t *testing.T) {
pubkeys := make([]bls.PublicKey, 0, 100)
atts := make([]*ethpb.Attestation, 0, 100)
msg := []byte("hello")
for i := 0; i < 100; i++ {
priv, err := bls.RandKey()
require.NoError(t, err)
pub := priv.PublicKey()
sig := priv.Sign(msg)
pubkeys = append(pubkeys, pub)
att := &ethpb.Attestation{Signature: sig.Marshal()}
atts = append(atts, att)
}
aggSig, err := helpers.AggregateSignature(atts[0 : len(atts)-2])
require.NoError(t, err)
assert.Equal(t, false, aggSig.FastAggregateVerify(pubkeys, bytesutil.ToBytes32(msg)), "Signature not suppose to verify")
})
}
func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
// Create 10 committees
committeeCount := uint64(10)

View File

@@ -150,7 +150,7 @@ func TestSlashValidator_OK(t *testing.T) {
maxBalance := params.BeaconConfig().MaxEffectiveBalance
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isnt the expected amount")
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
bal, err := state.BalanceAtIndex(proposer)

View File

@@ -543,7 +543,7 @@ func TestStore_Blocks_Retrieve_SlotRangeWithStep(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 150, len(retrieved))
for _, b := range retrieved {
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpect block slot %d", b.Block().Slot())
assert.Equal(t, primitives.Slot(0), (b.Block().Slot()-100)%step, "Unexpected block slot %d", b.Block().Slot())
}
})
}

View File

@@ -130,9 +130,15 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
return err
}
item := enc
if hasAltairKey(item) {
switch {
case hasAltairKey(enc):
item = item[len(altairKey):]
case hasBellatrixKey(enc):
item = item[len(bellatrixKey):]
case hasCapellaKey(enc):
item = item[len(capellaKey):]
}
detector, err := detect.FromState(item)
if err != nil {
return err
@@ -165,9 +171,14 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
return err
}
var stateBytes []byte
if hasAltairKey(enc) {
switch {
case hasAltairKey(enc):
stateBytes = snappy.Encode(nil, append(altairKey, rawObj...))
} else {
case hasBellatrixKey(enc):
stateBytes = snappy.Encode(nil, append(bellatrixKey, rawObj...))
case hasCapellaKey(enc):
stateBytes = snappy.Encode(nil, append(capellaKey, rawObj...))
default:
stateBytes = snappy.Encode(nil, rawObj)
}
if stateErr := stateBkt.Put(keys[index], stateBytes); stateErr != nil {

View File

@@ -313,3 +313,217 @@ func Test_migrateAltairStateValidators(t *testing.T) {
})
}
}
func Test_migrateBellatrixStateValidators(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
}{
{
name: "migrates validators and adds them to new buckets",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
assert.NoError(t, err)
_, err = tx.CreateBucketIfNotExists(blockRootValidatorHashesBucket)
assert.NoError(t, err)
return nil
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// check whether the new buckets are present
err := dbStore.db.View(func(tx *bbolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
assert.NotNil(t, valBkt)
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
assert.NotNil(t, idxBkt)
return nil
})
assert.NoError(t, err)
// check if the migration worked
blockRoot := [32]byte{'A'}
rcvdState, err := dbStore.State(context.Background(), blockRoot)
assert.NoError(t, err)
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
// find hashes of the validators that are set as part of the state
var hashes []byte
var individualHashes [][]byte
for _, val := range vals {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
hashes = append(hashes, hash[:]...)
individualHashes = append(individualHashes, hash[:])
}
// check if all the validators that were in the state, are stored properly in the validator bucket
pbState, err := state_native.ProtobufBeaconStateBellatrix(rcvdState.ToProtoUnsafe())
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
found := false
for _, h := range individualHashes {
if bytes.Equal(hash[:], h) {
found = true
}
}
require.Equal(t, true, found)
validatorsFoundCount++
}
require.Equal(t, len(vals), validatorsFoundCount)
// check if the state validator indexes are stored properly
err = dbStore.db.View(func(tx *bbolt.Tx) error {
rcvdValhashBytes := tx.Bucket(blockRootValidatorHashesBucket).Get(blockRoot[:])
rcvdValHashes, sErr := snappy.Decode(nil, rcvdValhashBytes)
assert.NoError(t, sErr)
require.DeepEqual(t, hashes, rcvdValHashes)
return nil
})
assert.NoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dbStore := setupDB(t)
// add a state with the given validators
vals := validators(10)
blockRoot := [32]byte{'A'}
st, _ := util.DeterministicGenesisStateBellatrix(t, 20)
err := st.SetFork(&v1alpha1.Fork{
PreviousVersion: params.BeaconConfig().AltairForkVersion,
CurrentVersion: params.BeaconConfig().BellatrixForkVersion,
Epoch: 0,
})
require.NoError(t, err)
assert.NoError(t, st.SetSlot(100))
assert.NoError(t, st.SetValidators(vals))
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
// enable historical state representation flag to test this
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: true,
})
defer resetCfg()
tt.setup(t, dbStore, st, vals)
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
tt.eval(t, dbStore, st, vals)
})
}
}
func Test_migrateCapellaStateValidators(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
eval func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator)
}{
{
name: "migrates validators and adds them to new buckets",
setup: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// create some new buckets that should be present for this migration
err := dbStore.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(stateValidatorsBucket)
assert.NoError(t, err)
_, err = tx.CreateBucketIfNotExists(blockRootValidatorHashesBucket)
assert.NoError(t, err)
return nil
})
assert.NoError(t, err)
},
eval: func(t *testing.T, dbStore *Store, state state.BeaconState, vals []*v1alpha1.Validator) {
// check whether the new buckets are present
err := dbStore.db.View(func(tx *bbolt.Tx) error {
valBkt := tx.Bucket(stateValidatorsBucket)
assert.NotNil(t, valBkt)
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
assert.NotNil(t, idxBkt)
return nil
})
assert.NoError(t, err)
// check if the migration worked
blockRoot := [32]byte{'A'}
rcvdState, err := dbStore.State(context.Background(), blockRoot)
assert.NoError(t, err)
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
// find hashes of the validators that are set as part of the state
var hashes []byte
var individualHashes [][]byte
for _, val := range vals {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
hashes = append(hashes, hash[:]...)
individualHashes = append(individualHashes, hash[:])
}
// check if all the validators that were in the state, are stored properly in the validator bucket
pbState, err := state_native.ProtobufBeaconStateCapella(rcvdState.ToProtoUnsafe())
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {
hash, hashErr := val.HashTreeRoot()
assert.NoError(t, hashErr)
found := false
for _, h := range individualHashes {
if bytes.Equal(hash[:], h) {
found = true
}
}
require.Equal(t, true, found)
validatorsFoundCount++
}
require.Equal(t, len(vals), validatorsFoundCount)
// check if the state validator indexes are stored properly
err = dbStore.db.View(func(tx *bbolt.Tx) error {
rcvdValhashBytes := tx.Bucket(blockRootValidatorHashesBucket).Get(blockRoot[:])
rcvdValHashes, sErr := snappy.Decode(nil, rcvdValhashBytes)
assert.NoError(t, sErr)
require.DeepEqual(t, hashes, rcvdValHashes)
return nil
})
assert.NoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dbStore := setupDB(t)
// add a state with the given validators
vals := validators(10)
blockRoot := [32]byte{'A'}
st, _ := util.DeterministicGenesisStateCapella(t, 20)
err := st.SetFork(&v1alpha1.Fork{
PreviousVersion: params.BeaconConfig().BellatrixForkVersion,
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
Epoch: 0,
})
require.NoError(t, err)
assert.NoError(t, st.SetSlot(100))
assert.NoError(t, st.SetValidators(vals))
assert.NoError(t, dbStore.SaveState(context.Background(), st, blockRoot))
// enable historical state representation flag to test this
resetCfg := features.InitWithReset(&features.Flags{
EnableHistoricalSpaceRepresentation: true,
})
defer resetCfg()
tt.setup(t, dbStore, st, vals)
assert.NoError(t, migrateStateValidators(context.Background(), dbStore.db), "migrateArchivedIndex(tx) error")
tt.eval(t, dbStore, st, vals)
})
}
}

View File

@@ -14,7 +14,6 @@ go_library(
"metrics.go",
"options.go",
"prometheus.go",
"provider.go",
"rpc_connection.go",
"service.go",
],
@@ -90,7 +89,6 @@ go_test(
"init_test.go",
"log_processing_test.go",
"prometheus_test.go",
"provider_test.go",
"service_test.go",
],
data = glob(["testdata/**"]),
@@ -122,7 +120,6 @@ go_test(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/clientstats:go_default_library",
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
)
@@ -15,7 +16,7 @@ type Option func(s *Service) error
// WithHttpEndpoint parse http endpoint for the powchain service to use.
func WithHttpEndpoint(endpointString string) Option {
return func(s *Service) error {
s.cfg.currHttpEndpoint = HttpEndpoint(endpointString)
s.cfg.currHttpEndpoint = network.HttpEndpoint(endpointString)
return nil
}
}
@@ -27,7 +28,7 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
return nil
}
// Overwrite authorization type for all endpoints to be of a bearer type.
hEndpoint := HttpEndpoint(endpointString)
hEndpoint := network.HttpEndpoint(endpointString)
hEndpoint.Auth.Method = authorization.Bearer
hEndpoint.Auth.Value = string(secret)

View File

@@ -15,7 +15,7 @@ import (
func TestCleanup(t *testing.T) {
ctx := context.Background()
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
unregistered := pc.unregister()
assert.Equal(t, true, unregistered, "PowchainCollector.unregister did not return true (via prometheus.DefaultRegistry)")
// PowchainCollector is a prometheus.Collector, so we should be able to register it again
@@ -39,7 +39,7 @@ func TestCleanup(t *testing.T) {
func TestCancelation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Uxpected error caling NewPowchainCollector")
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
ticker := time.NewTicker(10 * time.Second)
cancel()
select {

View File

@@ -1,49 +0,0 @@
package execution
import (
"encoding/base64"
"strings"
"github.com/prysmaticlabs/prysm/v4/network"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
)
// HttpEndpoint extracts an httputils.Endpoint from the provider parameter.
func HttpEndpoint(eth1Provider string) network.Endpoint {
endpoint := network.Endpoint{
Url: "",
Auth: network.AuthorizationData{
Method: authorization.None,
Value: "",
}}
authValues := strings.Split(eth1Provider, ",")
endpoint.Url = strings.TrimSpace(authValues[0])
if len(authValues) > 2 {
log.Errorf(
"ETH1 endpoint string can contain one comma for specifying the authorization header to access the provider."+
" String contains too many commas: %d. Skipping authorization.", len(authValues)-1)
} else if len(authValues) == 2 {
switch network.Method(strings.TrimSpace(authValues[1])) {
case authorization.Basic:
basicAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
if len(basicAuthValues) != 2 {
log.Errorf("Basic Authentication has incorrect format. Skipping authorization.")
} else {
endpoint.Auth.Method = authorization.Basic
endpoint.Auth.Value = base64.StdEncoding.EncodeToString([]byte(basicAuthValues[1]))
}
case authorization.Bearer:
bearerAuthValues := strings.Split(strings.TrimSpace(authValues[1]), " ")
if len(bearerAuthValues) != 2 {
log.Errorf("Bearer Authentication has incorrect format. Skipping authorization.")
} else {
endpoint.Auth.Method = authorization.Bearer
endpoint.Auth.Value = bearerAuthValues[1]
}
case authorization.None:
log.Errorf("Authorization has incorrect format or authorization type is not supported.")
}
}
return endpoint
}

View File

@@ -1,74 +0,0 @@
package execution
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/network/authorization"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestHttpEndpoint(t *testing.T) {
hook := logTest.NewGlobal()
url := "http://test"
t.Run("URL", func(t *testing.T) {
endpoint := HttpEndpoint(url)
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("URL with separator", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("URL with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(" " + url + " ,")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
})
t.Run("Basic auth", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Basic username:password")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
})
t.Run("Basic auth with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(url + ", Basic username:password ")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Basic, endpoint.Auth.Method)
assert.Equal(t, "dXNlcm5hbWU6cGFzc3dvcmQ=", endpoint.Auth.Value)
})
t.Run("Basic auth with incorrect format", func(t *testing.T) {
hook.Reset()
endpoint := HttpEndpoint(url + ",Basic username:password foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
t.Run("Bearer auth", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Bearer token")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
assert.Equal(t, "token", endpoint.Auth.Value)
})
t.Run("Bearer auth with whitespace", func(t *testing.T) {
endpoint := HttpEndpoint(url + ", Bearer token ")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.Bearer, endpoint.Auth.Method)
assert.Equal(t, "token", endpoint.Auth.Value)
})
t.Run("Bearer auth with incorrect format", func(t *testing.T) {
hook.Reset()
endpoint := HttpEndpoint(url + ",Bearer token foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
t.Run("Too many separators", func(t *testing.T) {
endpoint := HttpEndpoint(url + ",Bearer token,foo")
assert.Equal(t, url, endpoint.Url)
assert.Equal(t, authorization.None, endpoint.Auth.Method)
assert.LogsContain(t, hook, "Skipping authorization")
})
}

View File

@@ -3,7 +3,6 @@ package execution
import (
"context"
"fmt"
"net/url"
"strings"
"time"
@@ -107,26 +106,10 @@ func (s *Service) retryExecutionClientConnection(ctx context.Context, err error)
// Initializes an RPC connection with authentication headers.
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
// Need to handle ipc and http
var client *gethRPC.Client
u, err := url.Parse(endpoint.Url)
client, err := network.NewExecutionRPCClient(ctx, endpoint)
if err != nil {
return nil, err
}
switch u.Scheme {
case "http", "https":
client, err = gethRPC.DialOptions(ctx, endpoint.Url, gethRPC.WithHTTPClient(endpoint.HttpClient()))
if err != nil {
return nil, err
}
case "", "ipc":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
}
if endpoint.Auth.Method != authorization.None {
header, err := endpoint.Auth.ToHeaderValue()
if err != nil {

View File

@@ -228,10 +228,44 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
return n.root, nil
}
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
node, ok := f.store.nodeByRoot[cp.Root]
if !ok || node == nil {
return false, nil
}
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
}
if node.slot > epochStart {
return false, nil
}
if len(node.children) == 0 {
return true, nil
}
if node.slot == epochStart {
return true, nil
}
nodeEpoch := slots.ToEpoch(node.slot)
if nodeEpoch >= cp.Epoch {
return false, nil
}
for _, child := range node.children {
if child.slot > epochStart {
return true, nil
}
}
return false, nil
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes.
func (f *ForkChoice) updateBalances() error {
newBalances := f.justifiedBalances
zHash := params.BeaconConfig().ZeroHash
for index, vote := range f.votes {
// Skip if validator has been slashed
@@ -240,7 +274,7 @@ func (f *ForkChoice) updateBalances() error {
}
// Skip if validator has never voted for current root and next root (i.e. if the
// votes are zero hash aka genesis block), there's nothing to compute.
if vote.currentRoot == params.BeaconConfig().ZeroHash && vote.nextRoot == params.BeaconConfig().ZeroHash {
if vote.currentRoot == zHash && vote.nextRoot == zHash {
continue
}
@@ -260,7 +294,7 @@ func (f *ForkChoice) updateBalances() error {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != params.BeaconConfig().ZeroHash {
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
return errors.Wrap(ErrNilNode, "could not update balances")
@@ -269,7 +303,7 @@ func (f *ForkChoice) updateBalances() error {
}
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != params.BeaconConfig().ZeroHash {
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
return errors.Wrap(ErrNilNode, "could not update balances")
@@ -594,3 +628,12 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
return nil
}
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.slot, nil
}

View File

@@ -754,3 +754,110 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
got := f.UnrealizedJustifiedPayloadBlockHash()
require.Equal(t, [32]byte{'A'}, got)
}
func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children
require.NoError(t, f.InsertNode(ctx, st, root))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
require.NoError(t, err)
require.Equal(t, true, viable)
st, broot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, broot))
// Epoch start
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// No Children but impossible checkpoint
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
st, croot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, broot, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, croot))
// Children in same epoch
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
st, droot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, broot, [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, droot))
// Children in next epoch but boundary
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// Boundary block
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 0})
require.NoError(t, err)
require.Equal(t, false, viable)
// Children in next epoch
st, eroot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, broot, [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, eroot))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
}
func TestForkChoiceSlot(t *testing.T) {
f := setup(0, 0)
ctx := context.Background()
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
// No Node
_, err = f.Slot(root)
require.ErrorIs(t, ErrNilNode, err)
require.NoError(t, f.InsertNode(ctx, st, root))
slot, err := f.Slot(root)
require.NoError(t, err)
require.Equal(t, primitives.Slot(3), slot)
}

View File

@@ -31,9 +31,7 @@ import (
// store.justified_checkpoint = store.best_justified_checkpoint
func (f *ForkChoice) NewSlot(ctx context.Context, slot primitives.Slot) error {
// Reset proposer boost root
if err := f.resetBoostedProposerRoot(ctx); err != nil {
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
}
f.store.proposerBoostRoot = [32]byte{}
// Return if it's not a new epoch.
if !slots.IsEpochStart(slot) {

View File

@@ -1,19 +1,12 @@
package doublylinkedtree
import (
"context"
"fmt"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
)
// resetBoostedProposerRoot sets the value of the proposer boosted root to zeros.
func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
f.store.proposerBoostRoot = [32]byte{}
return nil
}
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes.
func (f *ForkChoice) applyProposerBoostScore() error {

View File

@@ -82,6 +82,11 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
return
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return
}
return true
}
@@ -137,6 +142,11 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
return head.root
}
// Only orphan a block if the parent LMD vote is strong
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return head.root
}
// Only reorg if we are proposing early
secs, err := slots.SecondsSinceSlotStart(head.slot+1, f.store.genesisTime, uint64(time.Now().Unix()))
if err != nil {

View File

@@ -22,7 +22,11 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
@@ -80,6 +84,12 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent = saved
})
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight
require.Equal(t, false, f.ShouldOverrideFCU())
@@ -101,7 +111,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
attesters := make([]uint64, f.numActiveValidators-64)
for i := range attesters {
attesters[i] = uint64(i + 64)
}
f.ProcessAttestation(ctx, attesters, root, 0)
driftGenesisTime(f, 3, 1)
childRoot := [32]byte{'b'}
@@ -161,6 +175,12 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
require.Equal(t, childRoot, f.GetProposerHead())
f.store.headNode.parent = saved
})
t.Run("parent is weak", func(t *testing.T) {
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.weight = saved
})
t.Run("Head is strong", func(t *testing.T) {
f.store.headNode.weight = f.store.committeeWeight
require.Equal(t, childRoot, f.GetProposerHead())

View File

@@ -53,6 +53,7 @@ type Getter interface {
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, primitives.Slot, error)
IsCanonical(root [32]byte) bool
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
FinalizedPayloadBlockHash() [32]byte
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
@@ -66,6 +67,7 @@ type Getter interface {
Tips() ([][32]byte, []primitives.Slot)
IsOptimistic(root [32]byte) (bool, error)
ShouldOverrideFCU() bool
Slot([32]byte) (primitives.Slot, error)
}
// Setter allows to set forkchoice information

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/sirupsen/logrus"
)
@@ -29,6 +30,9 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.R
if blk == nil || blk.Body() == nil {
return
}
if blk.Version() == version.Phase0 {
return
}
bits, err := blk.Body().SyncAggregate()
if err != nil {
log.WithError(err).Error("Could not get SyncAggregate")

View File

@@ -114,19 +114,11 @@ func (s *Service) Start() {
"ValidatorIndices": tracked,
}).Info("Starting service")
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
go s.run(stateChannel, stateSub)
go s.run()
}
// run waits until the beacon is synced and starts the monitoring system.
func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) {
if stateChannel == nil {
log.Error("State state is nil")
return
}
func (s *Service) run() {
if err := s.waitForSync(s.config.InitialSyncComplete); err != nil {
log.WithError(err)
return
@@ -154,6 +146,8 @@ func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription
s.isLogging = true
s.Unlock()
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
s.monitorRoutine(stateChannel, stateSub)
}

View File

@@ -271,11 +271,9 @@ func TestWaitForSyncCanceled(t *testing.T) {
func TestRun(t *testing.T) {
hook := logTest.NewGlobal()
s := setupService(t)
stateChannel := make(chan *feed.Event, 1)
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
go func() {
s.run(stateChannel, stateSub)
s.run()
}()
close(s.config.InitialSyncComplete)

View File

@@ -581,7 +581,8 @@ func (b *BeaconNode) fetchBuilderService() *builder.Service {
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(b.ctx, &attestations.Config{
Pool: b.attestationPool,
Pool: b.attestationPool,
InitialSyncComplete: b.initialSyncComplete,
})
if err != nil {
return errors.Wrap(err, "could not register atts pool service")
@@ -742,6 +743,7 @@ func (b *BeaconNode) registerSlasherService() error {
SlashingPoolInserter: b.slashingsPool,
SyncChecker: syncService,
HeadStateFetcher: chainService,
ClockWaiter: b.clockWaiter,
})
if err != nil {
return err

View File

@@ -18,6 +18,7 @@ go_library(
deps = [
"//beacon-chain/operations/attestations/kv:go_default_library",
"//cache/lru:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",

View File

@@ -53,27 +53,25 @@ func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unagg
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
for _, atts := range attsByDataRoot {
aggregatedAtts := make([]*ethpb.Attestation, 0, len(atts))
processedAtts, err := attaggregation.Aggregate(atts)
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return err
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
for _, att := range processedAtts {
if helpers.IsAggregated(att) {
aggregatedAtts = append(aggregatedAtts, att)
} else {
h, err := hashFn(att)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
}
if err := c.SaveAggregatedAttestations(aggregatedAtts); err != nil {
return err
} else {
h, err := hashFn(aggregated)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
}
}
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {
h, err := hashFn(att)
@@ -87,7 +85,6 @@ func (c *AttCaches) aggregateUnaggregatedAttestations(ctx context.Context, unagg
return err
}
}
return nil
}

View File

@@ -30,6 +30,20 @@ var (
Name: "expired_block_atts_total",
Help: "The number of expired and deleted block attestations in the pool.",
})
batchForkChoiceAttsT1 = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "aggregate_attestations_t1",
Help: "Captures times of attestation aggregation in milliseconds during the first interval per slot",
Buckets: []float64{100, 200, 500, 1000, 1500, 2000, 2500, 3500},
},
)
batchForkChoiceAttsT2 = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "aggregate_attestations_t2",
Help: "Captures times of attestation aggregation in milliseconds during the second interval per slot",
Buckets: []float64{10, 40, 100, 200, 600},
},
)
)
func (s *Service) updateMetrics() {

View File

@@ -7,6 +7,8 @@ import (
"time"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -14,20 +16,34 @@ import (
"go.opencensus.io/trace"
)
// Prepare attestations for fork choice three times per slot.
var prepareForkChoiceAttsPeriod = slots.DivideSlotBy(3 /* times-per-slot */)
// This prepares fork choice attestations by running batchForkChoiceAtts
// every prepareForkChoiceAttsPeriod.
func (s *Service) prepareForkChoiceAtts() {
ticker := time.NewTicker(prepareForkChoiceAttsPeriod)
defer ticker.Stop()
intervals := features.Get().AggregateIntervals
slotDuration := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
// Adjust intervals for networks with a lower slot duration (Hive, e2e, etc)
for {
if intervals[len(intervals)-1] >= slotDuration {
for i, offset := range intervals {
intervals[i] = offset / 2
}
} else {
break
}
}
ticker := slots.NewSlotTickerWithIntervals(time.Unix(int64(s.genesisTime), 0), intervals[:])
for {
select {
case <-ticker.C:
case <-ticker.C():
t := time.Now()
if err := s.batchForkChoiceAtts(s.ctx); err != nil {
log.WithError(err).Error("Could not prepare attestations for fork choice")
}
if slots.TimeIntoSlot(s.genesisTime) < intervals[1] {
batchForkChoiceAttsT1.Observe(float64(time.Since(t).Milliseconds()))
} else if slots.TimeIntoSlot(s.genesisTime) < intervals[2] {
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return

View File

@@ -5,6 +5,7 @@ package attestations
import (
"context"
"errors"
"time"
lru "github.com/hashicorp/golang-lru"
@@ -26,8 +27,9 @@ type Service struct {
// Config options for the service.
type Config struct {
Pool Pool
pruneInterval time.Duration
Pool Pool
pruneInterval time.Duration
InitialSyncComplete chan struct{}
}
// NewService instantiates a new attestation pool service instance that will
@@ -51,10 +53,24 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
// Start an attestation pool service's main event loop.
func (s *Service) Start() {
if err := s.waitForSync(s.cfg.InitialSyncComplete); err != nil {
log.WithError(err).Error("failed to wait for initial sync")
return
}
go s.prepareForkChoiceAtts()
go s.pruneAttsPool()
}
// waitForSync waits until the beacon node is synced to the latest head.
func (s *Service) waitForSync(syncChan chan struct{}) error {
select {
case <-syncChan:
return nil
case <-s.ctx.Done():
return errors.New("context closed, exiting goroutine")
}
}
// Stop the beacon block attestation pool service's main event loop
// and associated goroutines.
func (s *Service) Stop() error {

View File

@@ -30,8 +30,22 @@ func (s *Store) SaveSyncCommitteeMessage(msg *ethpb.SyncCommitteeMessage) error
return errors.New("not typed []ethpb.SyncCommitteeMessage")
}
messages = append(messages, copied)
savedSyncCommitteeMessageTotal.Inc()
idx := -1
for i, msg := range messages {
if msg.ValidatorIndex == copied.ValidatorIndex {
idx = i
break
}
}
if idx >= 0 {
// Override the existing messages with a new one
messages[idx] = copied
} else {
// Append the new message
messages = append(messages, copied)
savedSyncCommitteeMessageTotal.Inc()
}
return s.messageCache.Push(&queue.Item{
Key: syncCommitteeKey(msg.Slot),
Value: messages,

View File

@@ -55,6 +55,7 @@ go_library(
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/startup:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",

View File

@@ -55,6 +55,9 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
// broadcasted to the current fork.
func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error {
if att == nil {
return errors.New("attempted to broadcast nil attestation")
}
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastAttestation")
defer span.End()
forkDigest, err := s.currentForkDigest()
@@ -73,6 +76,9 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
// BroadcastSyncCommitteeMessage broadcasts a sync committee message to the p2p network, the message is assumed to be
// broadcasted to the current fork.
func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error {
if sMsg == nil {
return errors.New("attempted to broadcast nil sync committee message")
}
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastSyncCommitteeMessage")
defer span.End()
forkDigest, err := s.currentForkDigest()

View File

@@ -196,8 +196,12 @@ func TestService_BroadcastAttestation(t *testing.T) {
}
}(t)
// Attempt to broadcast nil object should fail.
ctx := context.Background()
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastAttestation(ctx, subnet, nil))
// Broadcast to peers and wait.
require.NoError(t, p.BroadcastAttestation(context.Background(), subnet, msg))
require.NoError(t, p.BroadcastAttestation(ctx, subnet, msg))
if util.WaitTimeout(&wg, 1*time.Second) {
t.Error("Failed to receive pubsub within 1s")
}
@@ -433,8 +437,12 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
}
}(t)
// Broadcasting nil should fail.
ctx := context.Background()
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastSyncCommitteeMessage(ctx, subnet, nil))
// Broadcast to peers and wait.
require.NoError(t, p.BroadcastSyncCommitteeMessage(context.Background(), subnet, msg))
require.NoError(t, p.BroadcastSyncCommitteeMessage(ctx, subnet, msg))
if util.WaitTimeout(&wg, 1*time.Second) {
t.Error("Failed to receive pubsub within 1s")
}

View File

@@ -460,6 +460,19 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
return addresses, nil
}
func peerIdsFromMultiAddrs(addrs []ma.Multiaddr) []peer.ID {
peers := []peer.ID{}
for _, a := range addrs {
info, err := peer.AddrInfoFromP2pAddr(a)
if err != nil {
log.WithError(err).Errorf("Could not derive peer info from multiaddress %s", a.String())
continue
}
peers = append(peers, info.ID)
}
return peers
}
func multiAddrFromString(address string) (ma.Multiaddr, error) {
return ma.NewMultiaddr(address)
}

View File

@@ -6,12 +6,14 @@ import (
"net"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/muxer/mplex"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/features"
ecdsaprysm "github.com/prysmaticlabs/prysm/v4/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
)
@@ -60,8 +62,8 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
libp2p.UserAgent(version.BuildData()),
libp2p.ConnectionGater(s),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
}
options = append(options, libp2p.Security(noise.ID, noise.New))
@@ -99,6 +101,9 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
}
// Disable Ping Service.
options = append(options, libp2p.Ping(false))
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
}
return options
}

View File

@@ -123,7 +123,7 @@ func TestDefaultMultiplexers(t *testing.T) {
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
assert.NoError(t, err)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[1].ID)
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
}

View File

@@ -14,6 +14,7 @@ go_library(
deps = [
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/rand:go_default_library",
@@ -44,6 +45,7 @@ go_test(
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",

View File

@@ -38,9 +38,10 @@ type StoreConfig struct {
// the mutex when accessing data.
type Store struct {
sync.RWMutex
ctx context.Context
config *StoreConfig
peers map[peer.ID]*PeerData
ctx context.Context
config *StoreConfig
peers map[peer.ID]*PeerData
trustedPeers map[peer.ID]bool
}
// PeerData aggregates protocol and application level info about a single peer.
@@ -69,9 +70,10 @@ type PeerData struct {
// NewStore creates new peer data store.
func NewStore(ctx context.Context, config *StoreConfig) *Store {
return &Store{
ctx: ctx,
config: config,
peers: make(map[peer.ID]*PeerData),
ctx: ctx,
config: config,
peers: make(map[peer.ID]*PeerData),
trustedPeers: make(map[peer.ID]bool),
}
}
@@ -105,12 +107,25 @@ func (s *Store) DeletePeerData(pid peer.ID) {
delete(s.peers, pid)
}
// SetTrustedPeers sets our desired trusted peer set.
func (s *Store) SetTrustedPeers(peers []peer.ID) {
for _, p := range peers {
s.trustedPeers[p] = true
}
}
// Peers returns map of peer data objects.
// Important: it is assumed that store mutex is locked when calling this method.
func (s *Store) Peers() map[peer.ID]*PeerData {
return s.peers
}
// IsTrustedPeer checks that the provided peer
// is in our trusted peer set.
func (s *Store) IsTrustedPeer(p peer.ID) bool {
return s.trustedPeers[p]
}
// Config exposes store configuration params.
func (s *Store) Config() *StoreConfig {
return s.config

View File

@@ -80,3 +80,20 @@ func TestStore_PeerDataGetOrCreate(t *testing.T) {
assert.Equal(t, uint64(0), peerData.ProcessedBlocks)
require.Equal(t, 1, len(store.Peers()))
}
func TestStore_TrustedPeers(t *testing.T) {
store := peerdata.NewStore(context.Background(), &peerdata.StoreConfig{
MaxPeers: 12,
})
pid1 := peer.ID("00001")
pid2 := peer.ID("00002")
pid3 := peer.ID("00003")
tPeers := []peer.ID{pid1, pid2, pid3}
store.SetTrustedPeers(tPeers)
assert.Equal(t, true, store.IsTrustedPeer(pid1))
assert.Equal(t, true, store.IsTrustedPeer(pid2))
assert.Equal(t, true, store.IsTrustedPeer(pid3))
}

View File

@@ -5,6 +5,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/sirupsen/logrus"
)
@@ -12,6 +13,11 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,

View File

@@ -15,6 +15,7 @@ go_library(
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/rand:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -39,6 +40,7 @@ go_test(
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/rand:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -9,6 +9,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
)
@@ -290,6 +291,9 @@ func (s *BlockProviderScorer) mapScoresAndPeers(
func (s *BlockProviderScorer) FormatScorePretty(pid peer.ID) string {
s.store.RLock()
defer s.store.RUnlock()
if !features.Get().EnablePeerScorer {
return "disabled"
}
score := s.score(pid)
return fmt.Sprintf("[%0.1f%%, raw: %0.2f, blocks: %d/%d]",
(score/s.MaxScore())*100, score, s.processedBlocks(pid), s.config.ProcessedBlocksCap)

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/time"
@@ -459,6 +460,16 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
tt.check(scorer)
})
}
t.Run("peer scorer disabled", func(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
peerStatuses := peerStatusGen()
scorer := peerStatuses.Scorers().BlockProviderScorer()
assert.Equal(t, "disabled", scorer.FormatScorePretty("peer1"))
})
}
func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/sirupsen/logrus"
)
@@ -14,6 +15,11 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,

View File

@@ -7,6 +7,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/config/features"
)
var _ Scorer = (*Service)(nil)
@@ -137,8 +138,10 @@ func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
if s.scorers.peerStatusScorer.isBadPeer(pid) {
return true
}
if s.scorers.gossipScorer.isBadPeer(pid) {
return true
if features.Get().EnablePeerScorer {
if s.scorers.gossipScorer.isBadPeer(pid) {
return true
}
}
return false
}

View File

@@ -36,6 +36,7 @@ import (
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
@@ -334,6 +335,10 @@ func (p *Status) IsBad(pid peer.ID) bool {
// isBad is the lock-free version of IsBad.
func (p *Status) isBad(pid peer.ID) bool {
// Do not disconnect from trusted peers.
if p.store.IsTrustedPeer(pid) {
return false
}
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
}
@@ -543,6 +548,11 @@ func (p *Status) Prune() {
p.store.Lock()
defer p.store.Unlock()
// Default to old method if flag isnt enabled.
if !features.Get().EnablePeerScorer {
p.deprecatedPrune()
return
}
// Exit early if there is nothing to prune.
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
return
@@ -587,6 +597,52 @@ func (p *Status) Prune() {
p.tallyIPTracker()
}
// Deprecated: This is the old peer pruning method based on
// bad response counts.
func (p *Status) deprecatedPrune() {
// Exit early if there is nothing to prune.
if len(p.store.Peers()) <= p.store.Config().MaxPeers {
return
}
notBadPeer := func(peerData *peerdata.PeerData) bool {
return peerData.BadResponses < p.scorers.BadResponsesScorer().Params().Threshold
}
type peerResp struct {
pid peer.ID
badResp int
}
peersToPrune := make([]*peerResp, 0)
// Select disconnected peers with a smaller bad response count.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerDisconnected && notBadPeer(peerData) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
})
}
}
// Sort peers in ascending order, so the peers with the
// least amount of bad responses are pruned first. This
// is to protect the node from malicious/lousy peers so
// that their memory is still kept.
sort.Slice(peersToPrune, func(i, j int) bool {
return peersToPrune[i].badResp < peersToPrune[j].badResp
})
limitDiff := len(p.store.Peers()) - p.store.Config().MaxPeers
if limitDiff > len(peersToPrune) {
limitDiff = len(peersToPrune)
}
peersToPrune = peersToPrune[:limitDiff]
// Delete peers from map.
for _, peerData := range peersToPrune {
p.store.DeletePeerData(peerData.pid)
}
p.tallyIPTracker()
}
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
// upon by the majority of peers. This method may not return the absolute highest finalized, but
// the finalized epoch in which most peers can serve blocks (plurality voting).
@@ -694,6 +750,9 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (
// bad response count. In the future scoring will be used
// to determine the most suitable peers to take out.
func (p *Status) PeersToPrune() []peer.ID {
if !features.Get().EnablePeerScorer {
return p.deprecatedPeersToPrune()
}
connLimit := p.ConnectedPeerLimit()
inBoundLimit := uint64(p.InboundLimit())
activePeers := p.Active()
@@ -714,7 +773,7 @@ func (p *Status) PeersToPrune() []peer.ID {
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected &&
peerData.Direction == network.DirInbound {
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
score: p.scorers.ScoreNoLock(pid),
@@ -757,6 +816,71 @@ func (p *Status) PeersToPrune() []peer.ID {
return ids
}
// Deprecated: Is used to represent the older method
// of pruning which utilized bad response counts.
func (p *Status) deprecatedPeersToPrune() []peer.ID {
connLimit := p.ConnectedPeerLimit()
inBoundLimit := p.InboundLimit()
activePeers := p.Active()
numInboundPeers := len(p.InboundConnected())
// Exit early if we are still below our max
// limit.
if uint64(len(activePeers)) <= connLimit {
return []peer.ID{}
}
p.store.Lock()
defer p.store.Unlock()
type peerResp struct {
pid peer.ID
badResp int
}
peersToPrune := make([]*peerResp, 0)
// Select connected and inbound peers to prune.
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected &&
peerData.Direction == network.DirInbound && !p.store.IsTrustedPeer(pid) {
peersToPrune = append(peersToPrune, &peerResp{
pid: pid,
badResp: peerData.BadResponses,
})
}
}
// Sort in descending order to favour pruning peers with a
// higher bad response count.
sort.Slice(peersToPrune, func(i, j int) bool {
return peersToPrune[i].badResp > peersToPrune[j].badResp
})
// Determine amount of peers to prune using our
// max connection limit.
amountToPrune, err := pmath.Sub64(uint64(len(activePeers)), connLimit)
if err != nil {
// This should never happen
log.WithError(err).Error("Failed to determine amount of peers to prune")
return []peer.ID{}
}
// Also check for inbound peers above our limit.
excessInbound := uint64(0)
if numInboundPeers > inBoundLimit {
excessInbound = uint64(numInboundPeers - inBoundLimit)
}
// Prune the largest amount between excess peers and
// excess inbound peers.
if excessInbound > amountToPrune {
amountToPrune = excessInbound
}
if amountToPrune < uint64(len(peersToPrune)) {
peersToPrune = peersToPrune[:amountToPrune]
}
ids := make([]peer.ID, 0, len(peersToPrune))
for _, pr := range peersToPrune {
ids = append(ids, pr.pid)
}
return ids
}
// HighestEpoch returns the highest epoch reported epoch amongst peers.
func (p *Status) HighestEpoch() primitives.Epoch {
p.store.RLock()
@@ -780,6 +904,14 @@ func (p *Status) ConnectedPeerLimit() uint64 {
return uint64(maxLim) - maxLimitBuffer
}
// SetTrustedPeers sets our trusted peer set into
// our peerstore.
func (p *Status) SetTrustedPeers(peers []peer.ID) {
p.store.Lock()
defer p.store.Unlock()
p.store.SetTrustedPeers(peers)
}
// this method assumes the store lock is acquired before
// executing the method.
func (p *Status) isfromBadIP(pid peer.ID) bool {

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
@@ -548,6 +549,10 @@ func TestPrune(t *testing.T) {
}
func TestPeerIPTracker(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
maxBadResponses := 2
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
@@ -582,7 +587,7 @@ func TestPeerIPTracker(t *testing.T) {
p.Prune()
for _, pr := range badPeers {
assert.Equal(t, true, p.IsBad(pr), "peer with good ip is regarded as bad")
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
@@ -686,6 +691,10 @@ func TestAtInboundPeerLimit(t *testing.T) {
}
func TestPrunePeers(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
@@ -735,6 +744,80 @@ func TestPrunePeers(t *testing.T) {
assert.Equal(t, network.DirInbound, dir)
}
// Ensure it is in the descending order.
currCount, err := p.Scorers().BadResponsesScorer().Count(peersToPrune[0])
require.NoError(t, err)
for _, pid := range peersToPrune {
count, err := p.Scorers().BadResponsesScorer().Count(pid)
require.NoError(t, err)
assert.Equal(t, true, currCount >= count)
currCount = count
}
}
func TestPrunePeers_TrustedPeers(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 1,
},
},
})
for i := 0; i < 15; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirOutbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are no prunable peers.
peersToPrune := p.PeersToPrune()
assert.Equal(t, 0, len(peersToPrune))
for i := 0; i < 18; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
// Assert there are the correct prunable peers.
peersToPrune = p.PeersToPrune()
assert.Equal(t, 3, len(peersToPrune))
// Add in more peers.
for i := 0; i < 13; i++ {
// Peer added to peer handler.
createPeer(t, p, nil, network.DirInbound, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
trustedPeers := []peer.ID{}
// Set up bad scores for inbound peers.
inboundPeers := p.InboundConnected()
for i, pid := range inboundPeers {
modulo := i % 5
// Increment bad scores for peers.
for j := 0; j < modulo; j++ {
p.Scorers().BadResponsesScorer().Increment(pid)
}
if modulo == 4 {
trustedPeers = append(trustedPeers, pid)
}
}
p.SetTrustedPeers(trustedPeers)
// Assert all peers more than max are prunable.
peersToPrune = p.PeersToPrune()
assert.Equal(t, 16, len(peersToPrune))
// Check that trusted peers are not pruned.
for _, pid := range peersToPrune {
for _, tPid := range trustedPeers {
assert.NotEqual(t, pid.String(), tPid.String())
}
}
for _, pid := range peersToPrune {
dir, err := p.Direction(pid)
require.NoError(t, err)
assert.Equal(t, network.DirInbound, dir)
}
// Ensure it is in the descending order.
currScore := p.Scorers().Score(peersToPrune[0])
for _, pid := range peersToPrune {

View File

@@ -202,7 +202,7 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
if string(c) == "%" {
next := string(topic[i+1])
if next != "d" && next != "x" {
t.Errorf("Topic %s has formatting incompatiable with scanfcheck. Only %%d and %%x are supported", topic)
t.Errorf("Topic %s has formatting incompatible with scanfcheck. Only %%d and %%x are supported", topic)
}
}
}

View File

@@ -210,6 +210,10 @@ func (s *Service) Start() {
if err != nil {
log.WithError(err).Error("Could not connect to static peer")
}
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
s.peers.SetTrustedPeers(pids)
peersToWatch = append(peersToWatch, s.cfg.StaticPeers...)
s.connectWithAllPeers(addrs)
}
// Initialize metadata according to the

View File

@@ -49,6 +49,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
topic += s.Encoding().ProtocolSuffix()
iterator := s.dv5Listener.RandomNodes()
defer iterator.Close()
switch {
case strings.Contains(topic, GossipAttestationMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
@@ -61,13 +62,13 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup)
for {
if currNum >= threshold {
break
}
if err := ctx.Err(); err != nil {
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
"only %d out of %d peers were able to be found", topic, currNum, threshold)
}
if currNum >= threshold {
break
}
nodes := enode.ReadNodes(iterator, int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch))
for _, node := range nodes {
info, _, err := convertToAddrInfo(node)

View File

@@ -6,10 +6,12 @@ go_library(
"blinded_blocks.go",
"blocks.go",
"config.go",
"handlers.go",
"log.go",
"pool.go",
"server.go",
"state.go",
"structs.go",
"sync_committee.go",
"validator.go",
],
@@ -41,7 +43,6 @@ go_library(
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -50,7 +51,9 @@ go_library(
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
@@ -58,8 +61,10 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_go_playground_validator_v10//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
@@ -76,6 +81,7 @@ go_test(
"blinded_blocks_test.go",
"blocks_test.go",
"config_test.go",
"handlers_test.go",
"init_test.go",
"pool_test.go",
"server_test.go",
@@ -87,7 +93,6 @@ go_test(
deps = [
"//api/grpc:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/builder/testing:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -106,6 +111,7 @@ go_test(
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -116,19 +122,21 @@ go_test(
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/mock:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@com_github_wealdtech_go_bytesutil//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",

View File

@@ -2,11 +2,15 @@ package beacon
import (
"context"
"strings"
"github.com/pkg/errors"
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
@@ -40,8 +44,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = getBlindedBlockAltair(blk)
@@ -49,8 +53,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockBellatrix(ctx, blk)
@@ -58,8 +62,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockCapella(ctx, blk)
@@ -67,8 +71,8 @@ func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
@@ -95,8 +99,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = getSSZBlockAltair(blk)
@@ -104,8 +108,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlindedSSZBlockBellatrix(ctx, blk)
@@ -113,8 +117,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlindedSSZBlockCapella(ctx, blk)
@@ -122,8 +126,8 @@ func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequ
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
@@ -142,6 +146,11 @@ func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBli
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
defer span.End()
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
switch blkContainer := req.Message.(type) {
case *ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock:
if err := bs.submitBlindedCapellaBlock(ctx, blkContainer.CapellaBlock, req.Signature); err != nil {
@@ -180,6 +189,11 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read"+versionHeader+" header")
@@ -202,31 +216,84 @@ func (bs *Server) SubmitBlindedBlockSSZ(ctx context.Context, req *ethpbv2.SSZCon
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
}
if block.IsBlinded() {
switch forkVer {
case bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion):
if !block.IsBlinded() {
return nil, status.Error(codes.InvalidArgument, "Submitted block is not blinded")
}
b, err := block.PbBlindedCapellaBlock()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedCapella{
BlindedCapella: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
case bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion):
if !block.IsBlinded() {
return nil, status.Error(codes.InvalidArgument, "Submitted block is not blinded")
}
b, err := block.PbBlindedBellatrixBlock()
if err != nil {
b, err := block.PbBlindedCapellaBlock()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
bb, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(b.Block)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not migrate block: %v", err)
}
return &emptypb.Empty{}, bs.submitBlindedCapellaBlock(ctx, bb, b.Signature)
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
bb, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.Block)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedBellatrix{
BlindedBellatrix: b,
},
})
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not migrate block: %v", err)
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, bs.submitBlindedBellatrixBlock(ctx, bb, b.Signature)
return &emptypb.Empty{}, nil
case bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion):
b, err := block.PbAltairBlock()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Altair{
Altair: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
case bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion):
b, err := block.PbPhase0Block()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Phase0{
Phase0: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
default:
return &emptypb.Empty{}, status.Errorf(codes.InvalidArgument, "Unsupported fork %s", string(forkVer[:]))
}
root, err := block.Block().HashTreeRoot()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
}
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
}
func getBlindedBlockPhase0(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
@@ -277,8 +344,8 @@ func getBlindedBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.B
func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -347,8 +414,8 @@ func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.R
func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
@@ -417,8 +484,8 @@ func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.Rea
func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -492,8 +559,8 @@ func (bs *Server) getBlindedSSZBlockBellatrix(ctx context.Context, blk interface
func (bs *Server) getBlindedSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
@@ -570,7 +637,7 @@ func (bs *Server) submitBlindedBellatrixBlock(ctx context.Context, blindedBellat
Signature: sig,
})
if err != nil {
return status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
return status.Errorf(codes.Internal, "Could not convert block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedBellatrix{
@@ -578,6 +645,9 @@ func (bs *Server) submitBlindedBellatrixBlock(ctx context.Context, blindedBellat
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose blinded block: %v", err)
}
return nil
@@ -589,7 +659,7 @@ func (bs *Server) submitBlindedCapellaBlock(ctx context.Context, blindedCapellaB
Signature: sig,
})
if err != nil {
return status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
return status.Errorf(codes.Internal, "Could not convert block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_BlindedCapella{
@@ -597,6 +667,9 @@ func (bs *Server) submitBlindedCapellaBlock(ctx context.Context, blindedCapellaB
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose blinded block: %v", err)
}
return nil

View File

@@ -4,22 +4,17 @@ import (
"context"
"testing"
"github.com/golang/mock/gomock"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"google.golang.org/grpc/metadata"
@@ -307,383 +302,214 @@ func TestServer_GetBlindedBlockSSZ(t *testing.T) {
})
}
func TestServer_SubmitBlindedBlockSSZ_OK(t *testing.T) {
func TestServer_SubmitBlindedBlockSSZ(t *testing.T) {
ctrl := gomock.NewController(t)
ctx := context.Background()
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBeaconBlock()
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBeaconBlockAltair()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Bellatrix", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
V1Alpha1ValidatorServer: alphaServer,
}
req := util.NewBlindedBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Capella", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockCapella()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
t.Run("Bellatrix full", func(t *testing.T) {
server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
V1Alpha1ValidatorServer: alphaServer,
}
req := util.NewBlindedBeaconBlockCapella()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
})
t.Run("Capella", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Capella full", func(t *testing.T) {
server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
b := util.NewBeaconBlockCapella()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlindedBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
})
t.Run("sync not ready", func(t *testing.T) {
chainService := &mock.ChainService{}
v1Server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := v1Server.SubmitBlindedBlockSSZ(context.Background(), nil)
require.ErrorContains(t, "Syncing to latest head", err)
})
}
func TestSubmitBlindedBlock(t *testing.T) {
ctrl := gomock.NewController(t)
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v1Block, err := migration.V1Alpha1ToV1SignedBlock(req)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Block.Block},
Signature: v1Block.Signature,
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: &ethpbv1.BeaconBlock{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(req.Block)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Block},
Signature: req.Signature,
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: &ethpbv2.BeaconBlockAltair{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Bellatrix", func(t *testing.T) {
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
transactionsRoot, err := ssz.TransactionsRoot(transactions)
require.NoError(t, err)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
V1Alpha1ValidatorServer: alphaServer,
}
blk := util.NewBeaconBlockBellatrix()
blk.Block.Slot = 5
blk.Block.ParentRoot = bsRoot[:]
blk.Block.Body.ExecutionPayload.Transactions = transactions
blindedBlk := util.NewBlindedBeaconBlockBellatrixV2()
blindedBlk.Message.Slot = 5
blindedBlk.Message.ParentRoot = bsRoot[:]
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
util.SaveBlock(t, ctx, beaconDB, blk)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blindedBlk.Message},
Signature: blindedBlk.Signature,
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: &ethpbv2.BlindedBeaconBlockBellatrix{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Capella", func(t *testing.T) {
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
transactionsRoot, err := ssz.TransactionsRoot(transactions)
require.NoError(t, err)
withdrawals := []*enginev1.Withdrawal{
{
Index: 1,
ValidatorIndex: 1,
Address: bytesutil.PadTo([]byte("address1"), 20),
Amount: 1,
},
{
Index: 2,
ValidatorIndex: 2,
Address: bytesutil.PadTo([]byte("address2"), 20),
Amount: 2,
},
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
withdrawalsRoot, err := ssz.WithdrawalSliceRoot(withdrawals, 16)
require.NoError(t, err)
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockCapella()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
alphaServer := &validator.Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
BlockBuilder: &builderTest.MockBuilderService{},
BlockReceiver: c,
BlockNotifier: &mock.MockBlockNotifier{},
}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
V1Alpha1ValidatorServer: alphaServer,
}
blk := util.NewBeaconBlockCapella()
blk.Block.Slot = 5
blk.Block.ParentRoot = bsRoot[:]
blk.Block.Body.ExecutionPayload.Transactions = transactions
blk.Block.Body.ExecutionPayload.Withdrawals = withdrawals
blindedBlk := util.NewBlindedBeaconBlockCapellaV2()
blindedBlk.Message.Slot = 5
blindedBlk.Message.ParentRoot = bsRoot[:]
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
blindedBlk.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
util.SaveBlock(t, ctx, beaconDB, blk)
blockReq := &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: blindedBlk.Message},
Signature: blindedBlk.Signature,
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: &ethpbv2.BlindedBeaconBlockCapella{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
_, err := server.SubmitBlindedBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("sync not ready", func(t *testing.T) {
chainService := &mock.ChainService{}
v1Server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := v1Server.SubmitBlindedBlock(context.Background(), nil)
require.ErrorContains(t, "Syncing to latest head", err)
})
}

View File

@@ -4,16 +4,15 @@ import (
"context"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filters"
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/v1alpha1/validator"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -25,6 +24,7 @@ import (
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
@@ -43,7 +43,7 @@ var (
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
// DEPRECATED: GetWeakSubjectivity endpoint will no longer be supported
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
// This is already a grpc error, so we can't wrap it any further
return nil, err
}
@@ -206,6 +206,11 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
defer span.End()
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
switch blkContainer := req.Message.(type) {
case *ethpbv2.SignedBeaconBlockContainer_Phase0Block:
if err := bs.submitPhase0Block(ctx, blkContainer.Phase0Block, req.Signature); err != nil {
@@ -241,6 +246,11 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlockSSZ")
defer span.End()
if err := rpchelpers.ValidateSyncGRPC(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not read "+versionHeader+" header")
@@ -262,11 +272,85 @@ func (bs *Server) SubmitBlockSSZ(ctx context.Context, req *ethpbv2.SSZContainer)
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not unmarshal request data into block: %v", err)
}
root, err := block.Block().HashTreeRoot()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not compute block's hash tree root: %v", err)
switch forkVer {
case bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion):
if block.IsBlinded() {
return nil, status.Error(codes.InvalidArgument, "Submitted block is blinded")
}
b, err := block.PbCapellaBlock()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Capella{
Capella: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
case bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion):
if block.IsBlinded() {
return nil, status.Error(codes.InvalidArgument, "Submitted block is blinded")
}
b, err := block.PbBellatrixBlock()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Bellatrix{
Bellatrix: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
case bytesutil.ToBytes4(params.BeaconConfig().AltairForkVersion):
b, err := block.PbAltairBlock()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Altair{
Altair: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
case bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion):
b, err := block.PbPhase0Block()
if err != nil {
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not get proto block: %v", err)
}
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Phase0{
Phase0: b,
},
})
if err != nil {
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return &emptypb.Empty{}, status.Error(codes.InvalidArgument, err.Error())
}
return &emptypb.Empty{}, status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
return &emptypb.Empty{}, nil
default:
return &emptypb.Empty{}, status.Errorf(codes.InvalidArgument, "Unsupported fork %s", string(forkVer[:]))
}
return &emptypb.Empty{}, bs.submitBlock(ctx, root, block)
}
// GetBlock retrieves block details for given block ID.
@@ -336,8 +420,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = getBlockAltair(blk)
@@ -345,8 +429,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlockBellatrix(ctx, blk)
@@ -354,8 +438,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getBlockCapella(ctx, blk)
@@ -363,8 +447,8 @@ func (bs *Server) GetBlockV2(ctx context.Context, req *ethpbv2.BlockRequestV2) (
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
@@ -390,8 +474,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = getSSZBlockAltair(blk)
@@ -399,8 +483,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getSSZBlockBellatrix(ctx, blk)
@@ -408,8 +492,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
result, err = bs.getSSZBlockCapella(ctx, blk)
@@ -417,8 +501,8 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
result.Finalized = bs.FinalizationFetcher.IsFinalized(ctx, blkRoot)
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if !errors.Is(err, consensus_types.ErrUnsupportedField) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
@@ -605,8 +689,8 @@ func getBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockRes
func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -675,8 +759,8 @@ func (bs *Server) getBlockBellatrix(ctx context.Context, blk interfaces.ReadOnly
func (bs *Server) getBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.BlockResponseV2, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
@@ -788,8 +872,8 @@ func getSSZBlockAltair(blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZCo
func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
@@ -864,8 +948,8 @@ func (bs *Server) getSSZBlockBellatrix(ctx context.Context, blk interfaces.ReadO
func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.SSZContainer, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedGetter) {
// ErrUnsupportedField means that we have another block type
if errors.Is(err, consensus_types.ErrUnsupportedField) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
@@ -940,36 +1024,39 @@ func (bs *Server) getSSZBlockCapella(ctx context.Context, blk interfaces.ReadOnl
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(&ethpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
return status.Errorf(codes.InvalidArgument, "Could not convert block: %v", err)
}
wrappedPhase0Blk, err := blocks.NewSignedBeaconBlock(v1alpha1Blk)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Phase0{
Phase0: v1alpha1Blk,
},
})
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
root, err := phase0Blk.HashTreeRoot()
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
}
return bs.submitBlock(ctx, root, wrappedPhase0Blk)
return nil
}
func (bs *Server) submitAltairBlock(ctx context.Context, altairBlk *ethpbv2.BeaconBlockAltair, sig []byte) error {
v1alpha1Blk, err := migration.AltairToV1Alpha1SignedBlock(&ethpbv2.SignedBeaconBlockAltair{Message: altairBlk, Signature: sig})
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
return status.Errorf(codes.InvalidArgument, "Could not convert block %v", err)
}
wrappedAltairBlk, err := blocks.NewSignedBeaconBlock(v1alpha1Blk)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Altair{
Altair: v1alpha1Blk,
},
})
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
root, err := altairBlk.HashTreeRoot()
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
}
return bs.submitBlock(ctx, root, wrappedAltairBlk)
return nil
}
func (bs *Server) submitBellatrixBlock(ctx context.Context, bellatrixBlk *ethpbv2.BeaconBlockBellatrix, sig []byte) error {
@@ -977,17 +1064,18 @@ func (bs *Server) submitBellatrixBlock(ctx context.Context, bellatrixBlk *ethpbv
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
}
wrappedBellatrixBlk, err := blocks.NewSignedBeaconBlock(v1alpha1Blk)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Bellatrix{
Bellatrix: v1alpha1Blk,
},
})
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
root, err := bellatrixBlk.HashTreeRoot()
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
}
return bs.submitBlock(ctx, root, wrappedBellatrixBlk)
return nil
}
func (bs *Server) submitCapellaBlock(ctx context.Context, capellaBlk *ethpbv2.BeaconBlockCapella, sig []byte) error {
@@ -995,42 +1083,16 @@ func (bs *Server) submitCapellaBlock(ctx context.Context, capellaBlk *ethpbv2.Be
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
}
wrappedCapellaBlk, err := blocks.NewSignedBeaconBlock(v1alpha1Blk)
_, err = bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, &eth.GenericSignedBeaconBlock{
Block: &eth.GenericSignedBeaconBlock_Capella{
Capella: v1alpha1Blk,
},
})
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
if strings.Contains(err.Error(), validator.CouldNotDecodeBlock) {
return status.Error(codes.InvalidArgument, err.Error())
}
return status.Errorf(codes.Internal, "Could not propose block: %v", err)
}
root, err := capellaBlk.HashTreeRoot()
if err != nil {
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
}
return bs.submitBlock(ctx, root, wrappedCapellaBlk)
}
func (bs *Server) submitBlock(ctx context.Context, blockRoot [fieldparams.RootLength]byte, block interfaces.ReadOnlySignedBeaconBlock) error {
// Do not block proposal critical path with debug logging or block feed updates.
defer func() {
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(blockRoot[:]))).Debugf(
"Block proposal received via RPC")
bs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
})
}()
// Broadcast the new block to the network.
blockPb, err := block.Proto()
if err != nil {
return errors.Wrap(err, "could not get protobuf block")
}
if err := bs.Broadcaster.Broadcast(ctx, blockPb); err != nil {
return status.Errorf(codes.Internal, "Could not broadcast block: %v", err)
}
if err := bs.BlockReceiver.ReceiveBlock(ctx, block, blockRoot); err != nil {
return status.Errorf(codes.Internal, "Could not process beacon block: %v", err)
}
return nil
}

View File

@@ -4,12 +4,13 @@ import (
"context"
"testing"
"github.com/golang/mock/gomock"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -20,6 +21,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/proto/migration"
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"google.golang.org/grpc/metadata"
@@ -347,319 +349,215 @@ func TestServer_ListBlockHeaders(t *testing.T) {
})
}
func TestServer_SubmitBlock_OK(t *testing.T) {
func TestServer_SubmitBlock(t *testing.T) {
ctrl := gomock.NewController(t)
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v1Block, err := migration.V1Alpha1ToV1SignedBlock(req)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_Phase0Block{Phase0Block: v1Block.Block},
Signature: v1Block.Signature,
Message: &ethpbv2.SignedBeaconBlockContainer_Phase0Block{Phase0Block: &ethpbv1.BeaconBlock{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(req.Block)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_AltairBlock{AltairBlock: v2Block},
Signature: req.Signature,
Message: &ethpbv2.SignedBeaconBlockContainer_AltairBlock{AltairBlock: &ethpbv2.BeaconBlockAltair{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Bellatrix", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockBellatrix()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v2Block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(req.Block)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Block},
Signature: req.Signature,
Message: &ethpbv2.SignedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: &ethpbv2.BeaconBlockBellatrix{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("Capella", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockCapella()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockCapella()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
v2Block, err := migration.V1Alpha1BeaconBlockCapellaToV2(req.Block)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, req)
blockReq := &ethpbv2.SignedBeaconBlockContainer{
Message: &ethpbv2.SignedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Block},
Signature: req.Signature,
Message: &ethpbv2.SignedBeaconBlockContainer_CapellaBlock{CapellaBlock: &ethpbv2.BeaconBlockCapella{}},
Signature: []byte("sig"),
}
_, err = beaconChainServer.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err := server.SubmitBlock(context.Background(), blockReq)
assert.NoError(t, err)
})
t.Run("sync not ready", func(t *testing.T) {
chainService := &mock.ChainService{}
v1Server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := v1Server.SubmitBlock(context.Background(), nil)
require.ErrorContains(t, "Syncing to latest head", err)
})
}
func TestServer_SubmitBlockSSZ_OK(t *testing.T) {
func TestServer_SubmitBlockSSZ(t *testing.T) {
ctrl := gomock.NewController(t)
ctx := context.Background()
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlock()
req.Block.Slot = 5
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBeaconBlock()
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "phase0")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockAltair()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockAltair()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBeaconBlockAltair()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().AltairForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "altair")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Bellatrix", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockBellatrix()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockBellatrix()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Capella", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlockCapella()
util.SaveBlock(t, context.Background(), beaconDB, genesis)
numDeposits := uint64(64)
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
bsRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
beaconChainServer := &Server{
BeaconDB: beaconDB,
BlockReceiver: c,
ChainInfoFetcher: c,
BlockNotifier: c.BlockNotifier(),
Broadcaster: mockp2p.NewTestP2P(t),
HeadFetcher: c,
t.Run("Bellatrix blinded", func(t *testing.T) {
server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
req := util.NewBeaconBlockCapella()
req.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
req.Block.ParentRoot = bsRoot[:]
util.SaveBlock(t, ctx, beaconDB, req)
blockSsz, err := req.MarshalSSZ()
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().BellatrixForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: blockSsz,
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "bellatrix")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
})
t.Run("Capella", func(t *testing.T) {
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), gomock.Any())
server := &Server{
V1Alpha1ValidatorServer: v1alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
b := util.NewBeaconBlockCapella()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = beaconChainServer.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err, "Could not propose block correctly")
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NoError(t, err)
})
t.Run("Capella blinded", func(t *testing.T) {
server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().CapellaForkEpoch))
ssz, err := b.MarshalSSZ()
require.NoError(t, err)
blockReq := &ethpbv2.SSZContainer{
Data: ssz,
}
md := metadata.MD{}
md.Set(versionHeader, "capella")
sszCtx := metadata.NewIncomingContext(ctx, md)
_, err = server.SubmitBlockSSZ(sszCtx, blockReq)
assert.NotNil(t, err)
})
t.Run("sync not ready", func(t *testing.T) {
chainService := &mock.ChainService{}
v1Server := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
HeadFetcher: chainService,
TimeFetcher: chainService,
OptimisticModeFetcher: chainService,
}
_, err := v1Server.SubmitBlockSSZ(context.Background(), nil)
require.ErrorContains(t, "Syncing to latest head", err)
})
}

View File

@@ -136,7 +136,7 @@ func TestGetSpec(t *testing.T) {
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 105, len(resp.Data))
assert.Equal(t, 108, len(resp.Data))
for k, v := range resp.Data {
switch k {
case "CONFIG_NAME":
@@ -331,6 +331,10 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "0x08000000", v)
case "DOMAIN_CONTRIBUTION_AND_PROOF":
assert.Equal(t, "0x09000000", v)
case "DOMAIN_BLS_TO_EXECUTION_CHANGE":
assert.Equal(t, "0x0a000000", v)
case "DOMAIN_APPLICATION_BUILDER":
assert.Equal(t, "0x00000001", v)
case "TRANSITION_TOTAL_DIFFICULTY":
assert.Equal(t, "0", v)
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
@@ -361,6 +365,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "2", v)
case "REORG_WEIGHT_THRESHOLD":
assert.Equal(t, "20", v)
case "REORG_PARENT_WEIGHT_THRESHOLD":
assert.Equal(t, "160", v)
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
default:
t.Errorf("Incorrect key: %s", k)

View File

@@ -0,0 +1,375 @@
package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/go-playground/validator/v10"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/network"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
const (
broadcastValidationQueryParam = "broadcast_validation"
broadcastValidationConsensus = "consensus"
broadcastValidationConsensusAndEquivocation = "consensus_and_equivocation"
)
// PublishBlindedBlockV2 instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct and publish a
// `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
// to be included in the beacon chain. The beacon node is not required to validate the signed
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
// successful. The beacon node is expected to integrate the new block into its state, and
// therefore validate the block internally, however blocks which fail the validation are still
// broadcast but a different status code is returned (202). Pre-Bellatrix, this endpoint will accept
// a `SignedBeaconBlock`. The broadcast behaviour may be adjusted via the `broadcast_validation`
// query parameter.
func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
if ok := bs.checkSync(r.Context(), w); !ok {
return
}
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
var capellaBlock *SignedBlindedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var bellatrixBlock *SignedBlindedBeaconBlockBellatrix
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var altairBlock *SignedBeaconBlockAltair
if err = unmarshalStrict(body, &altairBlock); err == nil {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var phase0Block *SignedBeaconBlock
if err = unmarshalStrict(body, &phase0Block); err == nil {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
errJson := &network.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
}
// PublishBlockV2 instructs the beacon node to broadcast a newly signed beacon block to the beacon network,
// to be included in the beacon chain. A success response (20x) indicates that the block
// passed gossip validation and was successfully broadcast onto the network.
// The beacon node is also expected to integrate the block into the state, but may broadcast it
// before doing so, so as to aid timely delivery of the block. Should the block fail full
// validation, a separate success response code (202) is used to indicate that the block was
// successfully broadcast but failed integration. The broadcast behaviour may be adjusted via the
// `broadcast_validation` query parameter.
func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
if ok := bs.checkSync(r.Context(), w); !ok {
return
}
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
var capellaBlock *SignedBeaconBlockCapella
if err = unmarshalStrict(body, &capellaBlock); err == nil {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var bellatrixBlock *SignedBeaconBlockBellatrix
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var altairBlock *SignedBeaconBlockAltair
if err = unmarshalStrict(body, &altairBlock); err == nil {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
var phase0Block *SignedBeaconBlock
if err = unmarshalStrict(body, &phase0Block); err == nil {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
return
}
}
errJson := &network.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
}
func (bs *Server) proposeBlock(ctx context.Context, w http.ResponseWriter, blk *eth.GenericSignedBeaconBlock) {
_, err := bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, blk)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return
}
}
func unmarshalStrict(data []byte, v interface{}) error {
dec := json.NewDecoder(bytes.NewReader(data))
dec.DisallowUnknownFields()
return dec.Decode(v)
}
func (bs *Server) validateBroadcast(r *http.Request, blk *eth.GenericSignedBeaconBlock) error {
switch r.URL.Query().Get(broadcastValidationQueryParam) {
case broadcastValidationConsensus:
b, err := blocks.NewSignedBeaconBlock(blk.Block)
if err != nil {
return errors.Wrapf(err, "could not create signed beacon block")
}
if err = bs.validateConsensus(r.Context(), b); err != nil {
return errors.Wrap(err, "consensus validation failed")
}
case broadcastValidationConsensusAndEquivocation:
b, err := blocks.NewSignedBeaconBlock(blk.Block)
if err != nil {
return errors.Wrapf(err, "could not create signed beacon block")
}
if err = bs.validateConsensus(r.Context(), b); err != nil {
return errors.Wrap(err, "consensus validation failed")
}
if err = bs.validateEquivocation(b.Block()); err != nil {
return errors.Wrap(err, "equivocation validation failed")
}
default:
return nil
}
return nil
}
func (bs *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
parentRoot := blk.Block().ParentRoot()
parentState, err := bs.Stater.State(ctx, parentRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent state")
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
}
return nil
}
func (bs *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
if bs.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
return fmt.Errorf("block for slot %d already exists in fork choice", blk.Slot())
}
return nil
}
func (bs *Server) checkSync(ctx context.Context, w http.ResponseWriter) bool {
isSyncing, syncDetails, err := helpers.ValidateSyncHTTP(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not check if node is syncing: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return false
}
if isSyncing {
msg := "Beacon node is currently syncing and not serving request on that endpoint"
details, err := json.Marshal(syncDetails)
if err == nil {
msg += " Details: " + string(details)
}
errJson := &network.DefaultErrorJson{
Message: msg,
Code: http.StatusServiceUnavailable,
}
network.WriteError(w, errJson)
return false
}
return true
}

Some files were not shown because too many files have changed in this diff Show More