Compare commits

...

54 Commits

Author SHA1 Message Date
terence tsao
e2422647d0 More errors. Still not done 2022-12-12 16:53:25 +08:00
terence tsao
f0ea14158e Merge branch 'develop' of github.com:prysmaticlabs/prysm into dev-payload-interfaces-no-panic 2022-12-12 16:02:04 +08:00
terence tsao
c228a3e0f6 Nishant's feedback 2022-12-12 16:01:27 +08:00
terencechain
09619c388f Remove Test_IsExecutionEnabledCapella (#11752)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-12-11 22:59:12 +00:00
Patrice Vignola
1531313603 Fix DeepSource errors in proposer_bellatrix.go (#11739)
* Fix DeepSource errors in

* Omit receiver name

* Address PR comments

* Remove unused variable

* Fix more DeepSource errors

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-12-10 10:27:07 +00:00
Potuz
babfc66c5b Check BLS changes when requesting from pool (#11718)
* Check BLS changes when requesting from pool

* Terence's suggestions

* Radek's suggestion

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-12-09 14:41:45 +00:00
terencechain
cb9b5e8f6e Use payload attribute type for engine-API (#11719)
* Add payload attribute type

* Gazelle

* Fix test

* Use new payload attribute type

* Fix test

* Fix test

* Update beacon-chain/execution/engine_client.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Feedbacks

* Fix suggestion

* Update argument, fix test

* Return emptyAttri instead of nil

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-12-08 21:39:45 +00:00
james-prysm
145eb5a8e4 add m1 chip info (#11736) 2022-12-08 15:20:32 -06:00
Preston Van Loon
7510f584cb beacondb: Remove cache look up and lock request from boltdb transaction for state summaries (#11745)
* Remove cache look up from bolt transaction

* remove bogus line, oops

* Remove independent cache lookup entirely and just use HasStateSummary

* Rm newline
2022-12-08 19:32:04 +00:00
Patrice Vignola
dbeb3ee886 Onboard validator's Beacon REST API usage to e2e tests (#11704)
* WIP

* WIP

* WIP

* WIP

* WIP

* WIP

* WIP

* Onboard validator's Beacon REST API usage to e2e tests

* Remove unused variables

* Remove use_beacon_api tags

* Fix DeepSource errors

* Revert unneeded changes

* Revert evaluator changes

* Revert import reordering

* Address PR comments

* Remove all REST API e2e tests except minimal one

* Fix validator pointing to inexisting beacon node port

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-12-08 14:38:56 +00:00
Preston Van Loon
ca2618110f e2e: Print process IDs for debugging. (#11734)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-12-08 04:48:16 +00:00
Nishant Das
642c399b9d Add Back Cross Client Option (#11738)
* add changes

* remove env var

* fix
2022-12-07 22:20:35 -06:00
Manu NALEPA
6eee539425 Add REST implementation for Validator's WaitForActivation (Ethereum Protocol Fellowship) (#11671)
* Implement REST `WaitForActivation`

* Activation: Factorize tests

* Fix PR comments

* `missingPubKeys`: Replace map by slice (no need to have a map here)

* Fix typo

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-12-07 19:20:11 +00:00
james-prysm
bd82eb873c Fix to Post Submit E2E (#11733) 2022-12-07 18:19:44 +01:00
Nishant Das
62455b7bcb Fix Lint and Minor Bugs in E2E (#11730) 2022-12-07 05:31:53 +00:00
Preston Van Loon
3d6d0a12dd Update go to 1.19.4 (#11727)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-12-06 21:20:25 +00:00
Patrice Vignola
05148dbc8f Fix DeepSource errors in the Validator's REST API (#11726) 2022-12-06 20:31:56 +00:00
james-prysm
19af1d2bb0 E2E: beacon APIs Part 1 (#11306)
* adding compare beacon block test

* fixing bazel

* fixing evaluator import

* fixing imports

* changing package name

* fixing bazel

* adding logic to check for checking epoch

* fixing linting

* adding check for attester duties

* handle both blockv1 and blockv2

* making middleware objects public instead

* adding test for block attestations

* fixing typo

* adding blockroot test

* adding test for attestations

* fixing type value

* fixing test

* adding in node endpoints

* fixing bazel

* updating web3signer

* printing beacon blocks on request

* fixing struct

* temp log

* forgot string cast

* adding comparison function

* fixing bazel and evaulators, WIP

* fixing bazel

* changing how to minify json

* trying multiclient

* fixing port problem

* reverting evaluator and making test only for mainnet scenario testing

* removing test data

* fixing linting unused functions
git push

* changed to reflect

* adding in ssz comparison

* fixing tests

* fixing conflict

* fixing tests

* making v2 the standard

* adding better error logging

* fixing type

* adding lighthouse settings and fixing some deepsource items

* testing adding delay to evaluator

* testing without peers check

* changing target peers to try to fix lighthouse peer connections

* temp removing other tests

* fix lint issue

* adding peers connect back in

* adding in state version

* fixing bazel

* fixing path error

* testing changes to state

* fix unmarshal

* simplifying beacon api e2e execution

* fixing missed assertian checks

* improve logging and debugging issue

* trying to fix unmarshal

* still breaking more test edits

* removing fork to test unmarshal

* fixing pathing

* resolving error

* fixing beacon_api

* merging in debug api to beacon_api test

* fixing lint and temp commenting out endpoint

* adding in custom comparison function

* fixing custom evaluator

* adding test for block header data

* fixing header evaluation

* add node apis

* fixing linting,adding tests

* fixing bazel and temp removing unused functions

* fixing deepsource and linting issues

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing review

* resolving more review comments

* fixing linting

* removing ssz return value as it's large and possibly not needed

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing more review comments

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fixing linting and review iteems

* fixing cognit complexity issue

* fixing linting

* fix log printout

* test build kite only with crossclient

* switching out evaluator to depositedvalidatorsareactive

* removed wrong evaluator switching correct one

* removing skip based on review comments

* fixing pathing issue

* test without participation at epoch

* testing without special lighthouse logic in evaluator

* reducing expected participation when multiclient

* fixing imports

* reducing epochs to see if less flaky

* testing with other tests added back in

* reducing epochs ran further

* testing only cross client again

* testing multi run again

* test reverted scenario for tests

* testing with cross client

* removing commented out function

* testing without peers connect

* adding optimization based on suggestions

* removed the wrong peers connect

* accidently commited something I shouldn't have

* fixing lighthouse flag

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/endtoend/evaluators/beaconapi_evaluators/beacon_api.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-12-06 16:01:17 +00:00
Nishant Das
faf16f9e56 Allow Nodes Running Via VPNs To Make Successful Dials (#11599)
* fix it

* fix dialer for now

* fix build

* fix test

* fix to v0.24.0

* fix gaz

* fix build

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-12-06 14:54:45 +00:00
Manu NALEPA
0a5c65e29c Add REST implementation for Validator's ValidatorIndex (#11712)
* Add GetAttestationData

* Add tests

* Add many more tests and refactor

* Fix logic

* Address PR comments

* Address PR comments

* Add jsonRestHandler and decouple http logic from rest of the code

* Add buildURL tests

* Remove handlers_test.go

* Improve tests

* Implement `ValidatorIndex` of `beaconApiValidatorClient` using Beacon API

* Implement getStateValidators

* `validatorIndex`: Use `getStateValidators`

Co-authored-by: Patrice Vignola <vignola.patrice@gmail.com>
2022-12-06 12:27:26 +00:00
Radosław Kapka
7dc966bb3b Update state Beacon APIs to Capella (#11708)
* proto

(cherry picked from commit 24f45e021061782ab4d6c101a95368310aad67b6)

* implementation

(cherry picked from commit bbfa22c2053e8176fc004b13ba9c8d62cc3bd352)

# Conflicts:
#	beacon-chain/rpc/apimiddleware/structs.go

* fix compilation error

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-12-06 00:36:23 +00:00
Radosław Kapka
51d35f36f0 Disallow computing committee assignments for old slots (#11722) 2022-12-05 23:37:08 +01:00
Patrice Vignola
943a0556e9 Add REST implementation for Validator's DomainData (#11711)
* Add REST implementation for Validator's DomainData

* Add missing dependency

* Fix getForkVersion logic

* Remove unused helpers

* Fix deepsource error

* Fix deepsource error

* Address PR comments

* Remove outdated comment

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-12-05 10:27:41 +00:00
terencechain
f7cecf9f8a Add payload attribute type (#11710)
* Add payload attribute type

* Gazelle

* Fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-12-02 18:23:47 +00:00
Nishant Das
8be73a52b1 Add State Check For BLS Execution Change Messages (#11716) 2022-12-02 11:13:21 +00:00
Potuz
bebceb3bfa Handle BLS to execution changes included in blocks (#11713)
* Handle BLS to execution changes included in blocks

* log

* review
2022-12-02 14:04:45 +08:00
Mart1i1n
d541010bf1 Fix Typo (#11670)
* Update ffg_update_test.go

Fix some alignment typos.

* Update justification_finalization.go

Fix typo.

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2022-11-30 22:25:55 +00:00
terencechain
8cb07e0c2b Spec test: capella and update to v1.3.0-alpha.1 (#11683) 2022-11-30 12:08:04 -08:00
Preston Van Loon
aa2bf0c9c4 Spectests: ensure test directories are not empty (#11709)
* Add an assertion that test folders are not empty

* more assertions

* only run sync tests on bellatrix or later
2022-11-30 17:32:10 +00:00
Ye Ding
e49d8f2162 Fix a race condition during initialization (#11444) (#11698)
* Fix a race condition during initialization (#11444)

* Fix tests

* Add more test cases

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-11-29 18:24:50 +00:00
shana
c6ed4e2089 Do not omit json fields if empty in builder client (#11673)
* Do not omit json fields if empty in builder client

* fix tests

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2022-11-29 12:12:13 +00:00
int88
0ad902e47d fix help message of metric doublylinkedtree_node_count (#11705) 2022-11-29 11:29:48 +00:00
Preston Van Loon
4f4775f9f9 bazel: Update rules_go and remove extra repos in WORKSPACE (#11703)
* Update rule_go to latest release, remove fuzzit stuff

* Delete another thing
2022-11-28 23:00:11 +00:00
terencechain
679e6bc54a Cont FCU if get payload attribute fails (#11693)
* Cont FCU if get payload attribute fails

* Fix err position

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2022-11-28 19:42:32 +00:00
Radosław Kapka
c7a3cf8563 GetForkChoice API endpoint (#11680)
* proto

* middleware

* change structure

* fix all issues

* test

* validity field
2022-11-28 19:17:53 +00:00
Radosław Kapka
6c3b75f908 Upgrade getBlindedBlock API endpoint to Capella (#11687)
* proto

(cherry picked from commit 7101910e0fab5a5572795115679fd6f8d8c8379b)

* GetBlindedBlock

(cherry picked from commit e5c269ddf7b0c9e04f72ed28982a82de56fcac55)

* middleware

(cherry picked from commit 1719ce5967b0f74786c596cc921f7256e6b224f3)

* refactor

* Update beacon-chain/rpc/apimiddleware/structs.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* update error message

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
2022-11-28 18:17:05 +00:00
Patrice Vignola
f276c5d006 Make the validator REST API's WaitForChainStart blocking (#11695) 2022-11-28 11:58:04 +01:00
Potuz
29953cb734 Refactor Sync Committee Rewards (#11696) 2022-11-27 11:06:32 -08:00
Nishant Das
a23a5052bc Add Gossip Handler For BLS To Execution Changes (#11690) 2022-11-26 11:07:05 -08:00
terence tsao
52d48b05f7 Started replacing panics with errors 2022-11-25 08:10:08 -08:00
terencechain
da7876a88a Merge branch 'develop' into inphi/dev-payload-interface 2022-11-25 07:35:08 -08:00
Potuz
f9e0d4b13a Batch capella signatures with the rest of the block (#11689) 2022-11-25 09:45:36 +08:00
Potuz
0aaee51973 Process bls changes (#11684)
* Implement ProcessBLSToExecutionChanges

* Batch process signatures

* gaz

* Change runtime behavior

* Terence's review
2022-11-24 19:36:12 +00:00
terencechain
a0c0706224 Add Capella state changes (#11688)
* Add Capella state changes

* Use params.configs
2022-11-24 14:54:55 -03:00
Potuz
a525fad0ea Add upgrade to Capella to statereplay (#11686) 2022-11-24 14:58:05 +00:00
Patrice Vignola
7ab5851c54 Add a gRPC fallback mode to the validator Beacon REST API (#11679)
* Add a gRPC fallback mode to the validator Beacon REST API

* Remove --beacon_api_grpc_fallback flag

* Add missing bazel dependency

* Reorder dependency per gazelle check

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-11-24 11:09:07 +00:00
Patrice Vignola
e231cfd59d Onboard validator's beacon REST API tests to CI (#11682)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-11-24 11:50:13 +01:00
kasey
0a41b957dc env var to control log path with unique paths (#11681)
due to flaky test re-run behavior, logs from a failed test run are
overwritten by subsequent retries. This makes it difficult to retrieve
logs after the first failed run. It also takes some squinting through
output to find the location of the log file in the first place. This
flag enables logs to be placed in an arbitrary path. Note that bazel
sandboxing generally will force this path to be in the /tmp tree.

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-11-23 22:56:40 +00:00
Radosław Kapka
f2399e21e1 GetLiveness API endpoint (#11617)
* proto

* initial version

* middleware + tests

* change request structure

* fix all issues

* review feedback

* simplify out-of-range check
2022-11-23 18:23:22 +00:00
kasey
395e49972e prysmctl support generating non-phase0 genesis.ssz (#11677)
* support generating non-phase0 genesis.ssz

* make default (Value) work for EnumValue + lint

* remove messy punctuation

* Ran gazelle for @kasey

* Fix deps viz

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prestonvanloon <preston@prysmaticlabs.com>
2022-11-23 14:22:24 +00:00
inphi
1863a16c25 fix test for fuzzing 2022-11-21 15:29:13 -05:00
inphi
99dba6d51b Fix spectest build errors 2022-11-21 14:52:55 -05:00
inphi
28c5d6d2cb Merge remote-tracking branch 'origin/develop' into inphi/dev-payload-interface 2022-11-21 14:40:15 -05:00
inphi
eeef4f473c Opaque payload header interface in beacon state
fixes #11604
2022-11-16 13:11:42 -05:00
456 changed files with 18123 additions and 4113 deletions

View File

@@ -230,9 +230,3 @@ build --modify_execution_info='GoStdlib.*=+no-remote-cache'
# Set bazel gotag
build --define gotags=bazel
# Build the binary with Beacon API calls for the validator
build --flag_alias=use_beacon_api=//validator/client/validator-client-factory:use_beacon_api
build:beacon_api --use_beacon_api
build:beacon_api --define=gotags=use_beacon_api

3
.gitignore vendored
View File

@@ -38,3 +38,6 @@ metaData
# execution API authentication
jwt.hex
# manual testing
tmp

View File

@@ -88,10 +88,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
sha256 = "ae013bf35bd23234d1dea46b079f1e05ba74ac0321423830119d3e787ec73483",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.36.0/rules_go-v0.36.0.zip",
],
)
@@ -110,13 +110,6 @@ git_repository(
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
)
http_archive(
name = "fuzzit_linux",
build_file_content = "exports_files([\"fuzzit\"])",
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
)
load(
"@io_bazel_rules_docker//repositories:repositories.bzl",
container_repositories = "repositories",
@@ -164,35 +157,15 @@ container_pull(
repository = "pinglamb/alpine-glibc",
)
container_pull(
name = "fuzzit_base",
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
registry = "gcr.io",
repository = "fuzzit-public/stretch-llvm8",
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
go_rules_dependencies()
go_register_toolchains(
go_version = "1.19.3",
go_version = "1.19.4",
nogo = "@//:nogo",
)
http_archive(
name = "prysm_testnet_site",
build_file_content = """
proto_library(
name = "faucet_proto",
srcs = ["src/proto/faucet.proto"],
visibility = ["//visibility:public"],
)""",
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
)
http_archive(
name = "io_kubernetes_build",
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
@@ -215,7 +188,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.2.0"
consensus_spec_version = "v1.3.0-alpha.1"
bls_test_version = "v0.1.1"
@@ -231,7 +204,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "eded065f923a99b78372d6f748c9b3f1de8229f8f574c1fec9c5fe76c8affb65",
sha256 = "b5a65eb5ecef1c4fca82ff29739936fee019e8a529ef392ea5e46aa39f40a0b2",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -247,7 +220,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "2ed83783129e93360f4bf9d5d5f606ee28adbe8b458acdfac61b8d99218d16a9",
sha256 = "b381bb0184e69cb17d05fbbe75f48c6aec7726957d073e3a65c26671d5d27d37",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -263,7 +236,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f5eff2adac78c99a4180491f373328465263caa2cba0206308a7c598abf76cda",
sha256 = "9466f2a5a2dea039a2deb953f0b5dce5399400028bf3f218ffef03f8ef9c446c",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -278,7 +251,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "f1a33b7459391716defa4c2b6f0c1bd7ccc38471ce9126d752d3bad767bebf2b",
sha256 = "3cc3141651a320a1f5767d15826e85aaa96eb4459d9e1a1d3f5a0cdbc79b8f56",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -81,7 +81,7 @@ func TestClient_Status(t *testing.T) {
func TestClient_RegisterValidator(t *testing.T) {
ctx := context.Background()
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
expectedPath := "/eth/v1/builder/validators"
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -111,6 +111,7 @@ func TestClient_RegisterValidator(t *testing.T) {
Timestamp: 42,
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
},
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
}
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
}

View File

@@ -23,8 +23,8 @@ type ValidatorRegistration struct {
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *ValidatorRegistration `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Message: &ValidatorRegistration{r.Message},
Signature: r.SignedValidatorRegistrationV1.Signature,
@@ -36,8 +36,8 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
r.SignedValidatorRegistrationV1 = &eth.SignedValidatorRegistrationV1{}
}
o := struct {
Message *ValidatorRegistration `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *ValidatorRegistration `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
@@ -49,10 +49,10 @@ func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
GasLimit string `json:"gas_limit,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
}{
FeeRecipient: r.FeeRecipient,
GasLimit: fmt.Sprintf("%d", r.GasLimit),
@@ -66,10 +66,10 @@ func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
r.ValidatorRegistrationV1 = &eth.ValidatorRegistrationV1{}
}
o := struct {
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
GasLimit string `json:"gas_limit,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
Timestamp string `json:"timestamp"`
Pubkey hexutil.Bytes `json:"pubkey"`
}{}
if err := json.Unmarshal(b, &o); err != nil {
return err
@@ -183,11 +183,11 @@ func (s Uint64String) MarshalText() ([]byte, error) {
}
type ExecHeaderResponse struct {
Version string `json:"version,omitempty"`
Version string `json:"version"`
Data struct {
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *BuilderBid `json:"message,omitempty"`
} `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBid `json:"message"`
} `json:"data"`
}
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
@@ -233,26 +233,26 @@ func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
}
type BuilderBid struct {
Header *ExecutionPayloadHeader `json:"header,omitempty"`
Value Uint256 `json:"value,omitempty"`
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
Header *ExecutionPayloadHeader `json:"header"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
type ExecutionPayloadHeader struct {
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
BlockNumber Uint64String `json:"block_number,omitempty"`
GasLimit Uint64String `json:"gas_limit,omitempty"`
GasUsed Uint64String `json:"gas_used,omitempty"`
Timestamp Uint64String `json:"timestamp,omitempty"`
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
*v1.ExecutionPayloadHeader
}
@@ -294,25 +294,25 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
}
type ExecPayloadResponse struct {
Version string `json:"version,omitempty"`
Data ExecutionPayload `json:"data,omitempty"`
Version string `json:"version"`
Data ExecutionPayload `json:"data"`
}
type ExecutionPayload struct {
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
BlockNumber Uint64String `json:"block_number,omitempty"`
GasLimit Uint64String `json:"gas_limit,omitempty"`
GasUsed Uint64String `json:"gas_used,omitempty"`
Timestamp Uint64String `json:"timestamp,omitempty"`
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
ParentHash hexutil.Bytes `json:"parent_hash"`
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
StateRoot hexutil.Bytes `json:"state_root"`
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
LogsBloom hexutil.Bytes `json:"logs_bloom"`
PrevRandao hexutil.Bytes `json:"prev_randao"`
BlockNumber Uint64String `json:"block_number"`
GasLimit Uint64String `json:"gas_limit"`
GasUsed Uint64String `json:"gas_used"`
Timestamp Uint64String `json:"timestamp"`
ExtraData hexutil.Bytes `json:"extra_data"`
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
BlockHash hexutil.Bytes `json:"block_hash"`
Transactions []hexutil.Bytes `json:"transactions"`
}
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
@@ -356,8 +356,8 @@ type BlindedBeaconBlockBodyBellatrix struct {
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *BlindedBeaconBlockBellatrix `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
@@ -367,10 +367,10 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index,omitempty"`
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
}{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
@@ -386,8 +386,8 @@ type ProposerSlashing struct {
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2"`
}{
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
@@ -400,8 +400,8 @@ type SignedBeaconBlockHeader struct {
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Header *BeaconBlockHeader `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Header *BeaconBlockHeader `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
Signature: h.SignedBeaconBlockHeader.Signature,
@@ -414,11 +414,11 @@ type BeaconBlockHeader struct {
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot,omitempty"`
ProposerIndex string `json:"proposer_index,omitempty"`
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot hexutil.Bytes `json:"parent_root"`
StateRoot hexutil.Bytes `json:"state_root"`
BodyRoot hexutil.Bytes `json:"body_root"`
}{
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
@@ -438,9 +438,9 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
}
return json.Marshal(struct {
AttestingIndices []string `json:"attesting_indices,omitempty"`
Data *AttestationData `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
AttestingIndices []string `json:"attesting_indices"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature"`
}{
AttestingIndices: indices,
Data: &AttestationData{a.IndexedAttestation.Data},
@@ -454,8 +454,8 @@ type AttesterSlashing struct {
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
Attestation1 *IndexedAttestation `json:"attestation_1"`
Attestation2 *IndexedAttestation `json:"attestation_2"`
}{
Attestation1: &IndexedAttestation{s.Attestation_1},
Attestation2: &IndexedAttestation{s.Attestation_2},
@@ -468,8 +468,8 @@ type Checkpoint struct {
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch,omitempty"`
Root hexutil.Bytes `json:"root,omitempty"`
Epoch string `json:"epoch"`
Root hexutil.Bytes `json:"root"`
}{
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
Root: c.Checkpoint.Root,
@@ -482,11 +482,11 @@ type AttestationData struct {
func (a *AttestationData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Slot string `json:"slot,omitempty"`
Index string `json:"index,omitempty"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
Source *Checkpoint `json:"source,omitempty"`
Target *Checkpoint `json:"target,omitempty"`
Slot string `json:"slot"`
Index string `json:"index"`
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root"`
Source *Checkpoint `json:"source"`
Target *Checkpoint `json:"target"`
}{
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
@@ -502,9 +502,9 @@ type Attestation struct {
func (a *Attestation) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
Data *AttestationData `json:"data,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
Data *AttestationData `json:"data"`
Signature hexutil.Bytes `json:"signature" ssz-size:"96"`
}{
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
Data: &AttestationData{a.Attestation.Data},
@@ -518,10 +518,10 @@ type DepositData struct {
func (d *DepositData) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
Amount string `json:"amount,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
PublicKey hexutil.Bytes `json:"pubkey"`
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
Amount string `json:"amount"`
Signature hexutil.Bytes `json:"signature"`
}{
PublicKey: d.PublicKey,
WithdrawalCredentials: d.WithdrawalCredentials,
@@ -554,8 +554,8 @@ type SignedVoluntaryExit struct {
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Message *VoluntaryExit `json:"message,omitempty"`
Signature hexutil.Bytes `json:"signature,omitempty"`
Message *VoluntaryExit `json:"message"`
Signature hexutil.Bytes `json:"signature"`
}{
Signature: sve.SignedVoluntaryExit.Signature,
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
@@ -568,8 +568,8 @@ type VoluntaryExit struct {
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Epoch string `json:"epoch,omitempty"`
ValidatorIndex string `json:"validator_index,omitempty"`
Epoch string `json:"epoch"`
ValidatorIndex string `json:"validator_index"`
}{
Epoch: fmt.Sprintf("%d", ve.Epoch),
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
@@ -582,8 +582,8 @@ type SyncAggregate struct {
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature"`
}{
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
@@ -596,9 +596,9 @@ type Eth1Data struct {
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
DepositCount string `json:"deposit_count,omitempty"`
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
DepositRoot hexutil.Bytes `json:"deposit_root"`
DepositCount string `json:"deposit_count"`
BlockHash hexutil.Bytes `json:"block_hash"`
}{
DepositRoot: e.DepositRoot,
DepositCount: fmt.Sprintf("%d", e.DepositCount),
@@ -628,16 +628,16 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
}
return json.Marshal(struct {
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
Attestations []*Attestation `json:"attestations,omitempty"`
Deposits []*Deposit `json:"deposits,omitempty"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti hexutil.Bytes `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
Attestations []*Attestation `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
}{
RandaoReveal: b.RandaoReveal,
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},

View File

@@ -53,6 +53,7 @@ go_library(
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -63,6 +64,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -181,7 +181,7 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
if ok {
return s.headState(ctx), nil
return s.headState(ctx)
}
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
@@ -195,7 +195,11 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch)
if !s.hasHeadState() {
return []types.ValidatorIndex{}, nil
}
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
hs, err := s.headState(ctx)
if err != nil {
return nil, err
}
return helpers.ActiveValidatorIndices(ctx, hs, epoch)
}
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.

View File

@@ -198,7 +198,11 @@ func TestHeadState_CanRetrieve(t *testing.T) {
c.head = &head{state: s}
headState, err := c.HeadState(context.Background())
require.NoError(t, err)
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
s1, err := headState.ToProtoUnsafe()
require.NoError(t, err)
s2, err := s.ToProtoUnsafe()
require.NoError(t, err)
assert.DeepEqual(t, s1, s2, "Incorrect head state received")
}
func TestGenesisTime_CanRetrieve(t *testing.T) {

View File

@@ -19,8 +19,6 @@ var (
errInvalidNilSummary = errors.New("nil summary returned from the DB")
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
// block is not a valid optimistic candidate block
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
// errNilStateFromStategen is returned when a nil state is returned from the state generator.

View File

@@ -15,9 +15,11 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -67,11 +69,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
if err != nil {
log.WithError(err).Error("Could not get head payload attribute")
return nil, nil
}
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if err != nil {
@@ -150,7 +148,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithError(err).Error("Could not set head root to valid")
return nil, nil
}
if hasAttr && payloadID != nil { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
if hasAttr && payloadID != nil {
var pId [8]byte
copy(pId[:], payloadID[:])
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
@@ -183,7 +182,7 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
// notifyNewPayload signals execution engine on a new payload.
// It returns true if the EL has returned VALID for the block
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
postStateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) (bool, error) {
postStateHeader interfaces.ExecutionDataHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
defer span.End()
@@ -251,22 +250,29 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, payloadattribute.Attributer, types.ValidatorIndex) {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
if !ok { // There's no need to build attribute if there is no proposer for slot.
return false, nil, 0, nil
return false, emptyAttri, 0
}
// Get previous randao.
st = st.Copy()
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
st, err := st.Copy()
if err != nil {
return false, nil, 0, err
log.WithError(err).Error("Could not copy state")
return false, emptyAttri, 0
}
st, err = transition.ProcessSlotsIfPossible(ctx, st, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri, 0
}
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
return false, nil, 0, nil
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return false, emptyAttri, 0
}
// Get fee recipient.
@@ -284,7 +290,8 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
"Please refer to our documentation for instructions")
}
case err != nil:
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
return false, emptyAttri, 0
default:
feeRecipient = recipient
}
@@ -292,14 +299,44 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
return false, nil, 0, err
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return false, emptyAttri, 0
}
attr := &enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
var attr payloadattribute.Attributer
switch st.Version() {
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return false, emptyAttri, 0
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
Withdrawals: withdrawals,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: feeRecipient.Bytes(),
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri, 0
}
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return false, emptyAttri, 0
}
return true, attr, proposerID, nil
return true, attr, proposerID
}
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.

View File

@@ -15,6 +15,7 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
bstate "github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
@@ -30,6 +31,69 @@ import (
logTest "github.com/sirupsen/logrus/hooks/test"
)
func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
require.NoError(t, err)
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
require.NoError(t, err)
fcs := doublylinkedtree.New()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fcs)),
WithForkChoiceStore(fcs),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
require.NoError(t, err)
pid := &v1.PayloadIDBytes{1}
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{PayloadIDBytes: pid}
st, _ = util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
// Intentionally generate a bad state such that `hash_tree_root` fails during `process_slot`
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
arg := &notifyForkchoiceUpdateArg{
headState: s,
headRoot: [32]byte{},
headBlock: b,
}
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(1, 0, [8]byte{}, [32]byte{})
got, err := service.notifyForkchoiceUpdate(ctx, arg)
require.NoError(t, err)
require.DeepEqual(t, got, pid) // We still get a payload ID even though the state is bad. This means it returns until the end.
}
func Test_NotifyForkchoiceUpdate(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
@@ -47,11 +111,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisState(t, 10)
service.head = &head{
state: st,
}
require.NoError(t, err)
ojc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
ofc := &ethpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
@@ -727,8 +792,8 @@ func Test_GetPayloadAttribute(t *testing.T) {
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
require.Equal(t, false, hasPayload)
require.Equal(t, types.ValidatorIndex(0), vId)
@@ -736,24 +801,65 @@ func Test_GetPayloadAttribute(t *testing.T) {
suggestedVid := types.ValidatorIndex(1)
slot := types.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
st, _ := util.DeterministicGenesisState(t, 1)
hook := logTest.NewGlobal()
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
require.NoError(t, err)
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
}
func Test_GetPayloadAttributeV2(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, doublylinkedtree.New())),
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
}
// Cache miss
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, _ := util.DeterministicGenesisStateCapella(t, 1)
hasPayload, _, vId := service.getPayloadAttribute(ctx, st, 0)
require.Equal(t, false, hasPayload)
require.Equal(t, types.ValidatorIndex(0), vId)
// Cache hit, advance state, no fee recipient
suggestedVid := types.ValidatorIndex(1)
slot := types.Slot(1)
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hook := logTest.NewGlobal()
hasPayload, attr, vId := service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
require.LogsContain(t, hook, "Fee recipient is currently using the burn address")
a, err := attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{}, [32]byte{})
hasPayload, attr, vId = service.getPayloadAttribute(ctx, st, slot)
require.Equal(t, true, hasPayload)
require.Equal(t, suggestedVid, vId)
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
require.Equal(t, 0, len(a))
}
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {

View File

@@ -201,11 +201,15 @@ func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, sta
if err != nil {
return err
}
copiedState, err := state.Copy()
if err != nil {
return err
}
s.head = &head{
slot: block.Block().Slot(),
root: root,
block: bCp,
state: state.Copy(),
state: copiedState,
}
return nil
}
@@ -258,7 +262,7 @@ func (s *Service) headBlock() (interfaces.SignedBeaconBlock, error) {
// This returns the head state.
// It does a full copy on head state for immutability.
// This is a lock free version.
func (s *Service) headState(ctx context.Context) state.BeaconState {
func (s *Service) headState(ctx context.Context) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
defer span.End()

View File

@@ -89,7 +89,13 @@ func TestSaveHead_Different(t *testing.T) {
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
headStateProto, err := headState.ToProto()
require.NoError(t, err)
hs, err := service.headState(ctx)
require.NoError(t, err)
serviceHeadStateProto, err := hs.ToProto()
require.NoError(t, err)
assert.DeepSSZEqual(t, headStateProto, serviceHeadStateProto, "Head did not change")
}
func TestSaveHead_Different_Reorg(t *testing.T) {
@@ -147,7 +153,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
pb, err := headBlock.Proto()
require.NoError(t, err)
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
headStateProto, err := headState.ToProto()
require.NoError(t, err)
hs, err := service.headState(ctx)
require.NoError(t, err)
serviceHeadStateProto, err := hs.ToProto()
require.NoError(t, err)
assert.DeepSSZEqual(t, headStateProto, serviceHeadStateProto, "Head did not change")
require.LogsContain(t, hook, "Chain reorg occurred")
require.LogsContain(t, hook, "distance=1")
require.LogsContain(t, hook, "depth=1")
@@ -501,7 +513,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
require.NoError(t, service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState))
headRoot := service.headRoot()
require.Equal(t, [32]byte{}, headRoot)

View File

@@ -117,12 +117,24 @@ func logPayload(block interfaces.BeaconBlock) error {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
log.WithFields(logrus.Fields{
fields := logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber,
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}).Debug("Synced new payload")
}
if block.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
fields["withdrawals"] = len(withdrawals)
changes, err := block.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
fields["blsToExecutionChanges"] = len(changes)
}
log.WithFields(fields).Debug("Synced new payload")
return nil
}

View File

@@ -328,7 +328,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
if err != nil {
return err
}
case version.Altair, version.Bellatrix:
case version.Altair, version.Bellatrix, version.Capella:
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
if err != nil {
return err
@@ -338,7 +338,11 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
return err
}
default:
return errors.Errorf("invalid state type provided: %T", headState.ToProtoUnsafe())
st, err := headState.ToProtoUnsafe()
if err != nil {
return err
}
return errors.Errorf("invalid state type provided: %T", st)
}
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))

View File

@@ -9,6 +9,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
@@ -99,6 +100,14 @@ func WithSlashingPool(p slashings.PoolManager) Option {
}
}
// WithBLSToExecPool to keep track of BLS to Execution address changes.
func WithBLSToExecPool(p blstoexec.PoolManager) Option {
return func(s *Service) error {
s.cfg.BLSToExecPool = p
return nil
}
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
return func(s *Service) error {

View File

@@ -146,7 +146,8 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err := genesisState.Copy()
require.NoError(t, err)
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
@@ -256,7 +257,11 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
require.NoError(t, err)
require.DeepSSZEqual(t, returned.ToProtoUnsafe(), cached.ToProtoUnsafe())
s1, err := returned.ToProtoUnsafe()
require.NoError(t, err)
s2, err := cached.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s1, s2)
}
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {

View File

@@ -146,6 +146,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
if err := s.handleBlockBLSToExecChanges(signed.Block()); err != nil {
return errors.Wrap(err, "could not handle block's BLSToExecutionChanges")
}
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
@@ -278,11 +282,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
return nil
}
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionData, error) {
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionDataHeader, error) {
if st == nil {
return 0, nil, errors.New("nil state")
}
var preStateHeader interfaces.ExecutionData
var preStateHeader interfaces.ExecutionDataHeader
var err error
preStateVersion := st.Version()
switch preStateVersion {
@@ -340,7 +344,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
type versionAndHeader struct {
version int
header interfaces.ExecutionData
header interfaces.ExecutionDataHeader
}
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
@@ -362,7 +366,11 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
}
// Save potential boundary states.
if slots.IsEpochStart(preState.Slot()) {
boundaries[blockRoots[i]] = preState.Copy()
st, err := preState.Copy()
if err != nil {
return err
}
boundaries[blockRoots[i]] = st
}
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
fCheckpoints[i] = preState.FinalizedCheckpoint()
@@ -472,8 +480,11 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
defer span.End()
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
copied, err := postState.Copy()
if err != nil {
return err
}
copied, err = transition.ProcessSlots(ctx, copied, copied.Slot()+1)
if err != nil {
return err
}
@@ -555,6 +566,22 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Be
return nil
}
func (s *Service) handleBlockBLSToExecChanges(blk interfaces.BeaconBlock) error {
if blk.Version() < version.Capella {
return nil
}
changes, err := blk.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
for _, change := range changes {
if err := s.cfg.BLSToExecPool.MarkIncluded(change); err != nil {
return errors.Wrap(err, "could not mark BLSToExecutionChange as included")
}
}
return nil
}
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
@@ -607,7 +634,7 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
}
// validateMergeTransitionBlock validates the merge transition block.
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.SignedBeaconBlock) error {
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionDataHeader, blk interfaces.SignedBeaconBlock) error {
// Skip validation if block is older than Bellatrix.
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
return nil
@@ -634,7 +661,7 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
// Skip validation if the block is not a merge transition block.
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
empty, err := consensusblocks.IsEmptyExecutionData(stateHeader)
empty, err := consensusblocks.IsEmptyExecutionDataHeader(stateHeader)
if err != nil {
return err
}
@@ -698,8 +725,12 @@ func (s *Service) fillMissingBlockPayloadId(ctx context.Context, ti time.Time) e
if err != nil {
return err
} else {
hs, err := s.headState(ctx)
if err != nil {
return err
}
if _, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: s.headState(ctx),
headState: hs,
headRoot: s.headRoot(),
headBlock: headBlock.Block(),
}); err != nil {

View File

@@ -26,6 +26,7 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
@@ -65,7 +66,9 @@ func TestStore_OnBlock(t *testing.T) {
require.NoError(t, err)
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
copiedSt, err := st.Copy()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copiedSt, validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
random := util.NewBeaconBlock()
@@ -75,11 +78,23 @@ func TestStore_OnBlock(t *testing.T) {
randomParentRoot, err := random.Block.HashTreeRoot()
assert.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
copiedSt1, err := st.Copy()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copiedSt1, randomParentRoot))
randomParentRoot2 := roots[1]
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
copiedSt2, err := st.Copy()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copiedSt2, bytesutil.ToBytes32(randomParentRoot2)))
c1, err := st.Copy()
require.NoError(t, err)
c2, err := st.Copy()
require.NoError(t, err)
c3, err := st.Copy()
require.NoError(t, err)
c4, err := st.Copy()
require.NoError(t, err)
tests := []struct {
name string
blk *ethpb.SignedBeaconBlock
@@ -90,7 +105,7 @@ func TestStore_OnBlock(t *testing.T) {
{
name: "parent block root does not have a state",
blk: util.NewBeaconBlock(),
s: st.Copy(),
s: c1,
wantErrString: "could not reconstruct parent state",
},
{
@@ -101,7 +116,7 @@ func TestStore_OnBlock(t *testing.T) {
b.Block.Slot = params.BeaconConfig().FarFutureSlot
return b
}(),
s: st.Copy(),
s: c2,
wantErrString: "is in the far distant future",
},
{
@@ -111,7 +126,7 @@ func TestStore_OnBlock(t *testing.T) {
b.Block.ParentRoot = randomParentRoot[:]
return b
}(),
s: st.Copy(),
s: c3,
wantErrString: "is not a descendant of the current finalized block",
},
{
@@ -122,7 +137,7 @@ func TestStore_OnBlock(t *testing.T) {
b.Block.ParentRoot = randomParentRoot2
return b
}(),
s: st.Copy(),
s: c4,
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
},
}
@@ -156,7 +171,8 @@ func TestStore_OnBlockBatch(t *testing.T) {
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
bState, err := st.Copy()
require.NoError(t, err)
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
@@ -199,7 +215,8 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
bState := st.Copy()
bState, err := st.Copy()
require.NoError(t, err)
var blks []interfaces.SignedBeaconBlock
var blkRoots [][32]byte
@@ -369,7 +386,9 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
copied, err := st.Copy()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, validGenesisRoot))
// Define a tree branch, slot 63 <- 64 <- 65
b63 := util.NewBeaconBlock()
@@ -428,7 +447,9 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
copied, err := st.Copy()
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, validGenesisRoot))
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
require.NoError(t, err)
@@ -522,17 +543,25 @@ func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byt
if err := beaconDB.SaveBlock(context.Background(), wsb); err != nil {
return nil, err
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
copied1, err := st.Copy()
require.NoError(t, err)
if err := beaconDB.SaveState(context.Background(), copied1, bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), r1); err != nil {
copied2, err := st.Copy()
require.NoError(t, err)
if err := beaconDB.SaveState(context.Background(), copied2, r1); err != nil {
return nil, err
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), r7); err != nil {
copied3, err := st.Copy()
require.NoError(t, err)
if err := beaconDB.SaveState(context.Background(), copied3, r7); err != nil {
return nil, err
}
if err := beaconDB.SaveState(context.Background(), st.Copy(), r8); err != nil {
copied4, err := st.Copy()
require.NoError(t, err)
if err := beaconDB.SaveState(context.Background(), copied4, r8); err != nil {
return nil, err
}
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
@@ -823,7 +852,8 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, gs))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
testState := gs.Copy()
testState, err := gs.Copy()
require.NoError(t, err)
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
require.NoError(t, err)
@@ -872,7 +902,8 @@ func TestOnBlock_CanFinalize(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
testState := gs.Copy()
testState, err := gs.Copy()
require.NoError(t, err)
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
require.NoError(t, err)
@@ -974,7 +1005,8 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
testState := gs.Copy()
testState, err := gs.Copy()
require.NoError(t, err)
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch; i++ {
blk, err := util.GenerateFullBlock(testState, keys, util.DefaultBlockGenConfig(), i)
require.NoError(t, err)
@@ -999,7 +1031,8 @@ func TestInsertFinalizedDeposits(t *testing.T) {
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gs = gs.Copy()
gs, err = gs.Copy()
require.NoError(t, err)
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 10}))
assert.NoError(t, gs.SetEth1DepositIndex(8))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
@@ -1033,11 +1066,13 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
gs, _ := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
gs = gs.Copy()
gs, err = gs.Copy()
require.NoError(t, err)
assert.NoError(t, gs.SetEth1Data(&ethpb.Eth1Data{DepositCount: 7}))
assert.NoError(t, gs.SetEth1DepositIndex(6))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
gs2 := gs.Copy()
gs2, err := gs.Copy()
require.NoError(t, err)
assert.NoError(t, gs2.SetEth1Data(&ethpb.Eth1Data{DepositCount: 15}))
assert.NoError(t, gs2.SetEth1DepositIndex(13))
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
@@ -2304,6 +2339,65 @@ func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
require.NoError(t, service.fillMissingBlockPayloadId(ctx, time.Unix(int64(params.BeaconConfig().SecondsPerSlot/2), 0)))
}
func TestHandleBBlockBLSToExecutionChanges(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
fc := doublylinkedtree.New()
pool := blstoexec.NewPool()
opts := []Option{
WithDatabase(beaconDB),
WithStateGen(stategen.New(beaconDB, fc)),
WithForkChoiceStore(fc),
WithStateNotifier(&mock.MockStateNotifier{}),
WithBLSToExecPool(pool),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
t.Run("pre Capella block", func(t *testing.T) {
body := &ethpb.BeaconBlockBodyBellatrix{}
pbb := &ethpb.BeaconBlockBellatrix{
Body: body,
}
blk, err := consensusblocks.NewBeaconBlock(pbb)
require.NoError(t, err)
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
})
t.Run("Post Capella no changes", func(t *testing.T) {
body := &ethpb.BeaconBlockBodyCapella{}
pbb := &ethpb.BeaconBlockCapella{
Body: body,
}
blk, err := consensusblocks.NewBeaconBlock(pbb)
require.NoError(t, err)
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
})
t.Run("Post Capella some changes", func(t *testing.T) {
idx := types.ValidatorIndex(123)
change := &ethpb.BLSToExecutionChange{
ValidatorIndex: idx,
}
signedChange := &ethpb.SignedBLSToExecutionChange{
Message: change,
}
body := &ethpb.BeaconBlockBodyCapella{
BlsToExecutionChanges: []*ethpb.SignedBLSToExecutionChange{signedChange},
}
pbb := &ethpb.BeaconBlockCapella{
Body: body,
}
blk, err := consensusblocks.NewBeaconBlock(pbb)
require.NoError(t, err)
pool.InsertBLSToExecChange(signedChange)
require.Equal(t, true, pool.ValidatorExists(idx))
require.NoError(t, service.handleBlockBLSToExecChanges(blk))
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(s *Service, slot int64, delay int64) {

View File

@@ -109,7 +109,8 @@ func TestProcessAttestations_Ok(t *testing.T) {
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err := genesisState.Copy()
require.NoError(t, err)
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
@@ -212,7 +213,8 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
copied := genesisState.Copy()
copied, err := genesisState.Copy()
require.NoError(t, err)
// Generate a new block for attesters to attest
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
@@ -269,7 +271,8 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
genesisState, pks := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, genesisState))
copied := genesisState.Copy()
copied, err := genesisState.Copy()
require.NoError(t, err)
// Generate a new block
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)

View File

@@ -33,7 +33,7 @@ func TestService_ReceiveBlock(t *testing.T) {
assert.NoError(t, err)
return blk
}
params.SetupTestConfigCleanupWithLock(t)
//params.SetupTestConfigCleanupWithLock(t)
bc := params.BeaconConfig().Copy()
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
params.OverrideBeaconConfig(bc)

View File

@@ -23,6 +23,7 @@ import (
f "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
@@ -70,6 +71,7 @@ type config struct {
AttPool attestations.Pool
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2p p2p.Broadcaster
MaxRoutines int
StateNotifier statefeed.Notifier
@@ -429,7 +431,9 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
}
s.originBlockRoot = genesisBlkRoot
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
if err := s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState); err != nil {
return err
}
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
log.WithError(err).Fatal("Could not process genesis block for fork choice")

View File

@@ -83,7 +83,9 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
srv.Stop()
})
bState, _ := util.DeterministicGenesisState(t, 10)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProtoUnsafe())
pbUnsafe, err := bState.ToProtoUnsafe()
require.NoError(t, err)
pbState, err := state_native.ProtobufBeaconStatePhase0(pbUnsafe)
require.NoError(t, err)
mockTrie, err := trie.NewTrie(0)
require.NoError(t, err)
@@ -325,7 +327,11 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
assert.DeepEqual(t, headBlock, pb, "Head block incorrect")
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
s1, err := headState.ToProtoUnsafe()
require.NoError(t, err)
s2, err := s.ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, s1, s2, "Head state incorrect")
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
r, err := c.HeadRoot(context.Background())
require.NoError(t, err)
@@ -380,7 +386,11 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
require.NoError(t, c.StartFromSavedState(headState))
s, err := c.HeadState(ctx)
require.NoError(t, err)
assert.DeepSSZEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Head state incorrect")
s1, err := headState.ToProtoUnsafe()
require.NoError(t, err)
s2, err := s.ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, s1, s2, "Head state incorrect")
assert.Equal(t, genesisRoot, c.originBlockRoot, "Genesis block root incorrect")
pb, err := c.head.block.Proto()
require.NoError(t, err)

View File

@@ -33,9 +33,13 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
s, err = cache.StateByCheckpoint(cp1)
require.NoError(t, err)
pbState1, err := state_native.ProtobufBeaconStatePhase0(s.ToProtoUnsafe())
pbs1, err := s.ToProtoUnsafe()
require.NoError(t, err)
pbstate, err := state_native.ProtobufBeaconStatePhase0(st.ToProtoUnsafe())
pbState1, err := state_native.ProtobufBeaconStatePhase0(pbs1)
require.NoError(t, err)
pbs2, err := st.ToProtoUnsafe()
require.NoError(t, err)
pbstate, err := state_native.ProtobufBeaconStatePhase0(pbs2)
require.NoError(t, err)
if !proto.Equal(pbState1, pbstate) {
t.Error("incorrectly cached state")
@@ -50,11 +54,19 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
s, err = cache.StateByCheckpoint(cp2)
require.NoError(t, err)
assert.DeepEqual(t, st2.ToProto(), s.ToProto(), "incorrectly cached state")
sProto, err := s.ToProto()
require.NoError(t, err)
st2Proto, err := st2.ToProto()
require.NoError(t, err)
assert.DeepEqual(t, st2Proto, sProto, "incorrectly cached state")
s, err = cache.StateByCheckpoint(cp1)
require.NoError(t, err)
assert.DeepEqual(t, st.ToProto(), s.ToProto(), "incorrectly cached state")
stProto, err := st.ToProto()
require.NoError(t, err)
sProto, err = s.ToProto()
require.NoError(t, err)
assert.DeepEqual(t, stProto, sProto, "incorrectly cached state")
}
func TestCheckpointStateCache_MaxSize(t *testing.T) {

View File

@@ -94,7 +94,11 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
if exists && item != nil {
skipSlotCacheHit.Inc()
span.AddAttributes(trace.BoolAttribute("hit", true))
return item.(state.BeaconState).Copy(), nil
c, err := item.(state.BeaconState).Copy()
if err != nil {
return nil, err
}
return c, nil
}
skipSlotCacheMiss.Inc()
span.AddAttributes(trace.BoolAttribute("hit", false))
@@ -132,10 +136,15 @@ func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) {
}
// Put the response in the cache.
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) {
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
if c.disabled {
return
return nil
}
// Copy state so cached value is not mutated.
c.cache.Add(r, state.Copy())
cpy, err := state.Copy()
if err != nil {
return err
}
c.cache.Add(r, cpy)
return nil
}

View File

@@ -28,10 +28,14 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
})
require.NoError(t, err)
c.Put(ctx, r, s)
require.NoError(t, c.Put(ctx, r, s))
c.MarkNotInProgress(r)
res, err := c.Get(ctx, r)
require.NoError(t, err)
assert.DeepEqual(t, res.ToProto(), s.ToProto(), "Expected equal protos to return from cache")
resProto, err := res.ToProto()
require.NoError(t, err)
sProto, err := s.ToProto()
require.NoError(t, err)
assert.DeepEqual(t, resProto, sProto, "Expected equal protos to return from cache")
}

View File

@@ -16,6 +16,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl/testnet:__pkg__",
"//testing/endtoend/evaluators:__subpackages__",
"//testing/spectest:__subpackages__",
"//testing/util:__pkg__",
@@ -58,6 +59,7 @@ go_test(
"deposit_test.go",
"epoch_precompute_test.go",
"epoch_spec_test.go",
"exports_test.go",
"reward_test.go",
"sync_committee_test.go",
"transition_test.go",

View File

@@ -9,7 +9,6 @@ import (
p2pType "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
@@ -46,7 +45,7 @@ import (
// else:
// decrease_balance(state, participant_index, participant_reward)
func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) {
votedKeys, votedIndices, didntVoteIndices, err := FilterSyncCommitteeVotes(s, sync)
s, votedKeys, err := processSyncAggregate(ctx, s, sync)
if err != nil {
return nil, errors.Wrap(err, "could not filter sync committee votes")
}
@@ -54,50 +53,70 @@ func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
return nil, errors.Wrap(err, "could not verify sync committee signature")
}
return ApplySyncRewardsPenalties(ctx, s, votedIndices, didntVoteIndices)
return s, nil
}
// FilterSyncCommitteeVotes filters the validator public keys and indices for the ones that voted and didn't vote.
func FilterSyncCommitteeVotes(s state.BeaconState, sync *ethpb.SyncAggregate) (
votedKeys []bls.PublicKey,
votedIndices []types.ValidatorIndex,
didntVoteIndices []types.ValidatorIndex,
err error) {
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
// verifying the BLS signatures. It returns the modified beacons state and the list of validators'
// public keys that voted, for future signature verification.
func processSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (
state.BeaconState,
[]bls.PublicKey,
error) {
currentSyncCommittee, err := s.CurrentSyncCommittee()
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
if currentSyncCommittee == nil {
return nil, nil, nil, errors.New("nil current sync committee in state")
return nil, nil, errors.New("nil current sync committee in state")
}
committeeKeys := currentSyncCommittee.Pubkeys
if sync.SyncCommitteeBits.Len() > uint64(len(committeeKeys)) {
return nil, nil, nil, errors.New("bits length exceeds committee length")
return nil, nil, errors.New("bits length exceeds committee length")
}
votedKeys = make([]bls.PublicKey, 0, len(committeeKeys))
votedIndices = make([]types.ValidatorIndex, 0, len(committeeKeys))
didntVoteIndices = make([]types.ValidatorIndex, 0) // No allocation. Expect most votes.
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, nil, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, nil, err
}
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, nil, err
}
earnedProposerReward := uint64(0)
for i := uint64(0); i < sync.SyncCommitteeBits.Len(); i++ {
vIdx, exists := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
// Impossible scenario.
if !exists {
return nil, nil, nil, errors.New("validator public key does not exist in state")
return nil, nil, errors.New("validator public key does not exist in state")
}
if sync.SyncCommitteeBits.BitAt(i) {
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
if err != nil {
return nil, nil, nil, err
return nil, nil, err
}
votedKeys = append(votedKeys, pubKey)
votedIndices = append(votedIndices, vIdx)
if err := helpers.IncreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
}
earnedProposerReward += proposerReward
} else {
didntVoteIndices = append(didntVoteIndices, vIdx)
if err := helpers.DecreaseBalance(s, vIdx, participantReward); err != nil {
return nil, nil, err
}
}
}
return
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, nil, err
}
return s, votedKeys, err
}
// VerifySyncCommitteeSig verifies sync committee signature `syncSig` is valid with respect to public keys `syncKeys`.
@@ -126,43 +145,6 @@ func VerifySyncCommitteeSig(s state.BeaconState, syncKeys []bls.PublicKey, syncS
return nil
}
// ApplySyncRewardsPenalties applies rewards and penalties for proposer and sync committee participants.
func ApplySyncRewardsPenalties(ctx context.Context, s state.BeaconState, votedIndices, didntVoteIndices []types.ValidatorIndex) (state.BeaconState, error) {
activeBalance, err := helpers.TotalActiveBalance(s)
if err != nil {
return nil, err
}
proposerReward, participantReward, err := SyncRewards(activeBalance)
if err != nil {
return nil, err
}
// Apply sync committee rewards.
earnedProposerReward := uint64(0)
for _, index := range votedIndices {
if err := helpers.IncreaseBalance(s, index, participantReward); err != nil {
return nil, err
}
earnedProposerReward += proposerReward
}
// Apply proposer rewards.
proposerIndex, err := helpers.BeaconProposerIndex(ctx, s)
if err != nil {
return nil, err
}
if err := helpers.IncreaseBalance(s, proposerIndex, earnedProposerReward); err != nil {
return nil, err
}
// Apply sync committee penalties.
for _, index := range didntVoteIndices {
if err := helpers.DecreaseBalance(s, index, participantReward); err != nil {
return nil, err
}
}
return s, nil
}
// SyncRewards returns the proposer reward and the sync participant reward given the total active balance in state.
func SyncRewards(activeBalance uint64) (proposerReward, participantReward uint64, err error) {
cfg := params.BeaconConfig()

View File

@@ -168,7 +168,36 @@ func TestProcessSyncCommittee_MixParticipation_GoodSignature(t *testing.T) {
require.NoError(t, err)
}
func TestProcessSyncCommittee_FilterSyncCommitteeVotes(t *testing.T) {
// This is a regression test #11696
func TestProcessSyncCommittee_DontPrecompute(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
require.NoError(t, err)
committeeKeys := committee.Pubkeys
committeeKeys[1] = committeeKeys[0]
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
idx, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[0]))
require.Equal(t, true, ok)
syncBits := bitfield.NewBitvector512()
for i := range syncBits {
syncBits[i] = 0xFF
}
syncBits.SetBitAt(0, false)
syncAggregate := &ethpb.SyncAggregate{
SyncCommitteeBits: syncBits,
}
require.NoError(t, beaconState.UpdateBalancesAtIndex(idx, 0))
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
require.Equal(t, 511, len(votedKeys))
require.DeepEqual(t, committeeKeys[0], votedKeys[0].Marshal())
balances := st.Balances()
require.Equal(t, uint64(988), balances[idx])
}
func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, beaconState.SetSlot(1))
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
@@ -183,25 +212,40 @@ func TestProcessSyncCommittee_FilterSyncCommitteeVotes(t *testing.T) {
SyncCommitteeBits: syncBits,
}
votedKeys, votedIndices, didntVoteIndices, err := altair.FilterSyncCommitteeVotes(beaconState, syncAggregate)
st, votedKeys, err := altair.ProcessSyncAggregateEported(context.Background(), beaconState, syncAggregate)
require.NoError(t, err)
votedMap := make(map[[fieldparams.BLSPubkeyLength]byte]bool)
for _, key := range votedKeys {
votedMap[bytesutil.ToBytes48(key.Marshal())] = true
}
require.Equal(t, int(syncBits.Len()/2), len(votedKeys))
require.Equal(t, int(syncBits.Len()/2), len(votedIndices))
require.Equal(t, int(syncBits.Len()/2), len(didntVoteIndices))
currentSyncCommittee, err := st.CurrentSyncCommittee()
require.NoError(t, err)
committeeKeys := currentSyncCommittee.Pubkeys
balances := st.Balances()
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
for i := 0; i < len(syncBits); i++ {
if syncBits.BitAt(uint64(i)) {
pk := beaconState.PubkeyAtIndex(votedIndices[i])
pk := bytesutil.ToBytes48(committeeKeys[i])
require.DeepEqual(t, true, votedMap[pk])
idx, ok := st.ValidatorIndexByPubkey(pk)
require.Equal(t, true, ok)
require.Equal(t, uint64(32000000988), balances[idx])
} else {
pk := beaconState.PubkeyAtIndex(didntVoteIndices[i])
pk := bytesutil.ToBytes48(committeeKeys[i])
require.DeepEqual(t, false, votedMap[pk])
idx, ok := st.ValidatorIndexByPubkey(pk)
require.Equal(t, true, ok)
if idx != proposerIndex {
require.Equal(t, uint64(31999999012), balances[idx])
}
}
}
require.Equal(t, uint64(32000035108), balances[proposerIndex])
}
func Test_VerifySyncCommitteeSig(t *testing.T) {
@@ -240,22 +284,6 @@ func Test_VerifySyncCommitteeSig(t *testing.T) {
require.NoError(t, altair.VerifySyncCommitteeSig(beaconState, pks, aggregatedSig))
}
func Test_ApplySyncRewardsPenalties(t *testing.T) {
beaconState, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
beaconState, err := altair.ApplySyncRewardsPenalties(context.Background(), beaconState,
[]types.ValidatorIndex{0, 1}, // voted
[]types.ValidatorIndex{2, 3}) // didn't vote
require.NoError(t, err)
balances := beaconState.Balances()
require.Equal(t, uint64(32000000988), balances[0])
require.Equal(t, balances[0], balances[1])
require.Equal(t, uint64(31999999012), balances[2])
require.Equal(t, balances[2], balances[3])
proposerIndex, err := helpers.BeaconProposerIndex(context.Background(), beaconState)
require.NoError(t, err)
require.Equal(t, uint64(32000000282), balances[proposerIndex])
}
func Test_SyncRewards(t *testing.T) {
tests := []struct {
name string

View File

@@ -307,7 +307,8 @@ func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) {
require.NoError(t, err)
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
require.NoError(t, err)
sCopy := s.Copy()
sCopy, err := s.Copy()
require.NoError(t, err)
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
require.NoError(t, err)

View File

@@ -0,0 +1,3 @@
package altair
var ProcessSyncAggregateEported = processSyncAggregate

View File

@@ -72,7 +72,8 @@ func TestTranslateParticipation(t *testing.T) {
func TestUpgradeToAltair(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
preForkState, err := st.Copy()
require.NoError(t, err)
aState, err := altair.UpgradeToAltair(context.Background(), st)
require.NoError(t, err)

View File

@@ -41,8 +41,8 @@ go_library(
"//contracts/deposit:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//crypto/hash/htr:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//math:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",
@@ -70,6 +70,7 @@ go_test(
"deposit_test.go",
"eth1_data_test.go",
"exit_test.go",
"exports_test.go",
"genesis_test.go",
"header_test.go",
"payload_test.go",
@@ -97,7 +98,8 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash/htr:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -0,0 +1,3 @@
package blocks
var ProcessBLSToExecutionChange = processBLSToExecutionChange

View File

@@ -41,7 +41,7 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
if err != nil {
return false, err
}
isEmpty, err := blocks.IsEmptyExecutionData(h)
isEmpty, err := blocks.IsEmptyExecutionDataHeader(h)
if err != nil {
return false, err
}
@@ -87,9 +87,6 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
if IsPreBellatrixVersion(st.Version()) {
return false, nil
}
if st.Version() > version.Bellatrix {
return true, nil
}
header, err := st.LatestExecutionPayloadHeader()
if err != nil {
return false, err
@@ -99,8 +96,8 @@ func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionData, body interfaces.BeaconBlockBody) (bool, error) {
isEmpty, err := blocks.IsEmptyExecutionData(header)
func IsExecutionEnabledUsingHeader(header interfaces.ExecutionDataHeader, body interfaces.BeaconBlockBody) (bool, error) {
isEmpty, err := blocks.IsEmptyExecutionDataHeader(header)
if err != nil {
return false, err
}
@@ -123,7 +120,7 @@ func IsPreBellatrixVersion(v int) bool {
// # Verify consistency of the parent hash with respect to the previous execution payload header
// if is_merge_complete(state):
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error {
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionDataHeader) error {
complete, err := IsMergeTransitionComplete(st)
if err != nil {
return err
@@ -226,7 +223,7 @@ func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (sta
}
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionDataHeader) error {
// Skip validation if the state is not merge compatible.
complete, err := IsMergeTransitionComplete(st)
if err != nil {
@@ -247,7 +244,7 @@ func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interf
}
// ValidatePayloadHeader validates the payload header.
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionDataHeader) error {
// Validate header's random mix matches with state in current epoch
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
@@ -269,7 +266,7 @@ func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData
}
// ProcessPayloadHeader processes the payload header.
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionDataHeader) (state.BeaconState, error) {
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
return nil, err
}

View File

@@ -253,7 +253,7 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
require.NoError(t, err)
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
require.NoError(t, err)
require.Equal(t, true, got)
require.Equal(t, false, got)
}
func Test_IsExecutionEnabled(t *testing.T) {
@@ -349,15 +349,6 @@ func Test_IsExecutionEnabled(t *testing.T) {
})
}
}
func Test_IsExecutionEnabledCapella(t *testing.T) {
st, _ := util.DeterministicGenesisStateCapella(t, 1)
blk := util.NewBeaconBlockCapella()
body, err := consensusblocks.NewBeaconBlockBody(blk.Block.Body)
require.NoError(t, err)
got, err := blocks.IsExecutionEnabled(st, body)
require.NoError(t, err)
require.Equal(t, true, got)
}
func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
tests := []struct {
@@ -746,7 +737,8 @@ func Test_ValidatePayloadHeader(t *testing.T) {
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
emptySt := st.Copy()
emptySt, err := st.Copy()
require.NoError(t, err)
wrappedHeader, err := consensusblocks.WrappedExecutionPayloadHeader(&enginev1.ExecutionPayloadHeader{BlockHash: []byte{'a'}})
require.NoError(t, err)
require.NoError(t, st.SetLatestExecutionPayloadHeader(wrappedHeader))

View File

@@ -8,17 +8,43 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/hash/htr"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
)
const executionToBLSPadding = 12
// ProcessBLSToExecutionChange validates a SignedBLSToExecution message and
func ProcessBLSToExecutionChanges(
st state.BeaconState,
signed interfaces.SignedBeaconBlock) (state.BeaconState, error) {
if signed.Version() < version.Capella {
return st, nil
}
changes, err := signed.Block().Body().BLSToExecutionChanges()
if err != nil {
return nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
}
// Return early if no changes
if len(changes) == 0 {
return st, nil
}
for _, change := range changes {
st, err = processBLSToExecutionChange(st, change)
if err != nil {
return nil, errors.Wrap(err, "could not process BLSToExecutionChange")
}
}
return st, nil
}
// processBLSToExecutionChange validates a SignedBLSToExecution message and
// changes the validator's withdrawal address accordingly.
//
// Spec pseudocode definition:
@@ -39,13 +65,30 @@ const executionToBLSPadding = 12
// + b'\x00' * 11
// + address_change.to_execution_address
// )
func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
func processBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) {
// Checks that the message passes the validation conditions.
val, err := ValidateBLSToExecutionChange(st, signed)
if err != nil {
return nil, err
}
message := signed.Message
newCredentials := make([]byte, executionToBLSPadding)
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
return st, err
}
// ValidateBLSToExecutionChange validates the execution change message against the state and returns the
// validator referenced by the message.
func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.SignedBLSToExecutionChange) (*ethpb.Validator, error) {
if signed == nil {
return st, errNilSignedWithdrawalMessage
return nil, errNilSignedWithdrawalMessage
}
message := signed.Message
if message == nil {
return st, errNilWithdrawalMessage
return nil, errNilWithdrawalMessage
}
val, err := st.ValidatorAtIndex(message.ValidatorIndex)
@@ -59,26 +102,12 @@ func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSTo
// hash the public key and verify it matches the withdrawal credentials
fromPubkey := message.FromBlsPubkey
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(fromPubkey[:32]), bytesutil.ToBytes32(fromPubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
if !bytes.Equal(digest[0][1:], cred[1:]) {
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(fromPubkey)
if !bytes.Equal(digest[1:], cred[1:]) {
return nil, errInvalidWithdrawalCredentials
}
epoch := slots.ToEpoch(st.Slot())
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
if err := signing.VerifySigningRoot(message, fromPubkey, signed.Signature, domain); err != nil {
return nil, signing.ErrSigFailedToVerify
}
newCredentials := make([]byte, executionToBLSPadding)
newCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
val.WithdrawalCredentials = append(newCredentials, message.ToExecutionAddress...)
err = st.UpdateValidatorAtIndex(message.ValidatorIndex, val)
return st, err
return val, nil
}
func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal) (state.BeaconState, error) {
@@ -121,3 +150,37 @@ func ProcessWithdrawals(st state.BeaconState, withdrawals []*enginev1.Withdrawal
}
return st, nil
}
func BLSChangesSignatureBatch(
st state.ReadOnlyBeaconState,
changes []*ethpb.SignedBLSToExecutionChange,
) (*bls.SignatureBatch, error) {
// Return early if no changes
if len(changes) == 0 {
return bls.NewSet(), nil
}
batch := &bls.SignatureBatch{
Signatures: make([][]byte, len(changes)),
PublicKeys: make([]bls.PublicKey, len(changes)),
Messages: make([][32]byte, len(changes)),
}
epoch := slots.ToEpoch(st.Slot())
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
for i, change := range changes {
batch.Signatures[i] = change.Signature
publicKey, err := bls.PublicKeyFromBytes(change.Message.FromBlsPubkey)
if err != nil {
return nil, errors.Wrap(err, "could not convert bytes to public key")
}
batch.PublicKeys[i] = publicKey
htr, err := signing.SigningData(change.Message.HashTreeRoot, domain)
if err != nil {
return nil, errors.Wrap(err, "could not compute BLSToExecutionChange signing data")
}
batch.Messages[i] = htr
}
return batch, nil
}

View File

@@ -10,10 +10,12 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/hash/htr"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/require"
@@ -32,14 +34,13 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
FromBlsPubkey: pubkey,
}
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[0][:],
WithdrawalCredentials: digest[:],
},
}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -68,6 +69,47 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
require.DeepEqual(t, message.ToExecutionAddress, val.WithdrawalCredentials[12:])
})
t.Run("happy case only validation", func(t *testing.T) {
priv, err := bls.RandKey()
require.NoError(t, err)
pubkey := priv.PublicKey().Marshal()
message := &ethpb.BLSToExecutionChange{
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
ValidatorIndex: 0,
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[:],
},
}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: registry,
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
})
require.NoError(t, err)
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
require.NoError(t, err)
signed := &ethpb.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
val, err := blocks.ValidateBLSToExecutionChange(st, signed)
require.NoError(t, err)
require.DeepEqual(t, digest[:], val.WithdrawalCredentials)
})
t.Run("non-existent validator", func(t *testing.T) {
priv, err := bls.RandKey()
@@ -80,14 +122,13 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
FromBlsPubkey: pubkey,
}
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[0][:],
WithdrawalCredentials: digest[:],
},
}
st, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -160,15 +201,13 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
ValidatorIndex: 0,
FromBlsPubkey: pubkey,
}
pubkeyChunks := [][32]byte{bytesutil.ToBytes32(pubkey[:32]), bytesutil.ToBytes32(pubkey[32:])}
digest := make([][32]byte, 1)
htr.VectorizedSha256(pubkeyChunks, digest)
digest[0][0] = params.BeaconConfig().BLSWithdrawalPrefixByte
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
registry := []*ethpb.Validator{
{
WithdrawalCredentials: digest[0][:],
WithdrawalCredentials: digest[:],
},
}
registry[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
@@ -508,7 +547,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
checkPostState := func(t *testing.T, expected control, st state.BeaconState) {
l, err := st.LastWithdrawalValidatorIndex()
l, err := st.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, expected.NextWithdrawalValidatorIndex, l)
@@ -578,3 +617,137 @@ func TestProcessWithdrawals(t *testing.T) {
})
}
}
func TestProcessBLSToExecutionChanges(t *testing.T) {
spb := &ethpb.BeaconStateCapella{
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
}
numValidators := 10
validators := make([]*ethpb.Validator, numValidators)
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
spb.Balances = make([]uint64, numValidators)
privKeys := make([]common.SecretKey, numValidators)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
v.WithdrawalCredentials = make([]byte, 32)
priv, err := bls.RandKey()
require.NoError(t, err)
privKeys[i] = priv
pubkey := priv.PublicKey().Marshal()
message := &ethpb.BLSToExecutionChange{
ToExecutionAddress: executionAddress,
ValidatorIndex: types.ValidatorIndex(i),
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
copy(v.WithdrawalCredentials, digest[:])
validators[i] = v
blsChanges[i] = message
}
spb.Validators = validators
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signed := &ethpb.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
signedChanges[i] = signed
}
body := &ethpb.BeaconBlockBodyCapella{
BlsToExecutionChanges: signedChanges,
}
bpb := &ethpb.BeaconBlockCapella{
Body: body,
}
sbpb := &ethpb.SignedBeaconBlockCapella{
Block: bpb,
}
signed, err := consensusblocks.NewSignedBeaconBlock(sbpb)
require.NoError(t, err)
st, err = blocks.ProcessBLSToExecutionChanges(st, signed)
require.NoError(t, err)
vals := st.Validators()
for _, val := range vals {
require.DeepEqual(t, executionAddress, val.WithdrawalCredentials[12:])
require.Equal(t, params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, val.WithdrawalCredentials[0])
}
}
func TestBLSChangesSignatureBatch(t *testing.T) {
spb := &ethpb.BeaconStateCapella{
Fork: &ethpb.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
}
numValidators := 10
validators := make([]*ethpb.Validator, numValidators)
blsChanges := make([]*ethpb.BLSToExecutionChange, numValidators)
spb.Balances = make([]uint64, numValidators)
privKeys := make([]common.SecretKey, numValidators)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
for i := range validators {
v := &ethpb.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
v.WithdrawalCredentials = make([]byte, 32)
priv, err := bls.RandKey()
require.NoError(t, err)
privKeys[i] = priv
pubkey := priv.PublicKey().Marshal()
message := &ethpb.BLSToExecutionChange{
ToExecutionAddress: executionAddress,
ValidatorIndex: types.ValidatorIndex(i),
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
copy(v.WithdrawalCredentials, digest[:])
validators[i] = v
blsChanges[i] = message
}
spb.Validators = validators
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*ethpb.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signed := &ethpb.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
signedChanges[i] = signed
}
batch, err := blocks.BLSChangesSignatureBatch(st, signedChanges)
require.NoError(t, err)
verify, err := batch.Verify()
require.NoError(t, err)
require.Equal(t, true, verify)
}

View File

@@ -15,7 +15,8 @@ import (
func TestUpgradeToCapella(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
preForkState, err := st.Copy()
require.NoError(t, err)
mSt, err := capella.UpgradeToCapella(st)
require.NoError(t, err)
@@ -95,7 +96,7 @@ func TestUpgradeToCapella(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(0), nwi)
lwvi, err := mSt.LastWithdrawalValidatorIndex()
lwvi, err := mSt.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, types.ValidatorIndex(0), lwvi)
}

View File

@@ -84,7 +84,7 @@ func processJustificationBits(state state.BeaconState, totalActiveBalance, prevE
return newBits
}
// updateJustificationAndFinalization processes justification and finalization during
// weighJustificationAndFinalization processes justification and finalization during
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield.Bitvector4) (state.BeaconState, error) {
jc, fc, err := computeCheckpoints(state, newBits)

View File

@@ -6,6 +6,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/execution",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl/testnet:__pkg__",
"//testing/spectest:__subpackages__",
"//validator/client:__pkg__",
],

View File

@@ -14,7 +14,8 @@ import (
func TestUpgradeToBellatrix(t *testing.T) {
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
preForkState := st.Copy()
preForkState, err := st.Copy()
require.NoError(t, err)
mSt, err := execution.UpgradeToBellatrix(st)
require.NoError(t, err)

View File

@@ -177,6 +177,14 @@ func CommitteeAssignments(
if err != nil {
return nil, nil, err
}
minValidStartSlot := types.Slot(0)
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
}
if startSlot < minValidStartSlot {
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
}
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.

View File

@@ -235,6 +235,25 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
Slot: params.BeaconConfig().SlotsPerHistoricalRoot + 1,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, _, err = CommitteeAssignments(context.Background(), state, 0)
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
}
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)

View File

@@ -47,12 +47,12 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj f
// domain=domain,
// ))
func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) {
return signingData(object.HashTreeRoot, domain)
return SigningData(object.HashTreeRoot, domain)
}
// Computes the signing data by utilising the provided root function and then
// returning the signing data of the container object.
func signingData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
func SigningData(rootFunc func() ([32]byte, error), domain []byte) ([32]byte, error) {
objRoot, err := rootFunc()
if err != nil {
return [32]byte{}, err
@@ -107,7 +107,7 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
root, err := signingData(blkHdr.HashTreeRoot, domain)
root, err := SigningData(blkHdr.HashTreeRoot, domain)
if err != nil {
return errors.Wrap(err, "could not compute signing root")
}
@@ -146,7 +146,7 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt
return nil, errors.Wrap(err, "could not convert bytes to public key")
}
// utilize custom block hashing function
root, err := signingData(rootFunc, domain)
root, err := SigningData(rootFunc, domain)
if err != nil {
return nil, errors.Wrap(err, "could not compute signing root")
}

View File

@@ -50,7 +50,9 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
c, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), c, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -96,7 +98,8 @@ func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
c := beaconState.Copy()
c, err = beaconState.Copy()
require.NoError(t, err)
sig, err := util.BlockSignatureAltair(c, block.Block, privKeys)
require.NoError(t, err)
block.Signature = sig.Marshal()
@@ -137,7 +140,9 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
c, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), c, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -184,7 +189,8 @@ func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
c := beaconState.Copy()
c, err = beaconState.Copy()
require.NoError(t, err)
sig, err := util.BlockSignatureAltair(c, block.Block, privKeys)
require.NoError(t, err)
block.Signature = sig.Marshal()
@@ -235,7 +241,8 @@ func createFullAltairBlockWithOperations(t *testing.T) (state.BeaconState,
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
assert.NoError(t, err)
assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
tState := beaconState.Copy()
tState, err := beaconState.Copy()
assert.NoError(t, err)
blk, err := util.GenerateFullBlockAltair(tState, privKeys,
&util.BlockGenConfig{NumAttestations: 1, NumVoluntaryExits: 0, NumDeposits: 0}, 1)
require.NoError(t, err)

View File

@@ -52,7 +52,9 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
copied, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -98,7 +100,8 @@ func TestExecuteBellatrixStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
c := beaconState.Copy()
c, err := beaconState.Copy()
require.NoError(t, err)
sig, err := util.BlockSignature(c, block.Block, privKeys)
require.NoError(t, err)
block.Signature = sig.Marshal()
@@ -139,7 +142,9 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
copied, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -186,7 +191,8 @@ func TestExecuteBellatrixStateTransitionNoVerifySignature_CouldNotVerifyStateRoo
require.NoError(t, err)
block.Block.StateRoot = stateRoot[:]
c := beaconState.Copy()
c, err := beaconState.Copy()
require.NoError(t, err)
sig, err := util.BlockSignature(c, block.Block, privKeys)
require.NoError(t, err)
block.Signature = sig.Marshal()

View File

@@ -25,7 +25,8 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
defer undo()
beaconState, err := benchmark.PreGenState1Epoch()
require.NoError(b, err)
cleanStates := clonedStates(beaconState)
cleanStates, err := clonedStates(beaconState)
require.NoError(b, err)
block, err := benchmark.PreGenFullBlock()
require.NoError(b, err)
@@ -45,7 +46,8 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
beaconState, err := benchmark.PreGenState1Epoch()
require.NoError(b, err)
cleanStates := clonedStates(beaconState)
cleanStates, err := clonedStates(beaconState)
require.NoError(b, err)
block, err := benchmark.PreGenFullBlock()
require.NoError(b, err)
@@ -88,7 +90,9 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
for i := 0; i < b.N; i++ {
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
// at run time.
_, err := coreState.ProcessEpochPrecompute(context.Background(), beaconState.Copy())
copied, err := beaconState.Copy()
require.NoError(b, err)
_, err = coreState.ProcessEpochPrecompute(context.Background(), copied)
require.NoError(b, err)
}
}
@@ -124,7 +128,9 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
func BenchmarkMarshalState_FullState(b *testing.B) {
beaconState, err := benchmark.PreGenstateFullEpochs()
require.NoError(b, err)
natState, err := state_native.ProtobufBeaconStatePhase0(beaconState.ToProtoUnsafe())
pb, err := beaconState.ToProtoUnsafe()
require.NoError(b, err)
natState, err := state_native.ProtobufBeaconStatePhase0(pb)
require.NoError(b, err)
b.Run("Proto_Marshal", func(b *testing.B) {
b.ResetTimer()
@@ -148,7 +154,9 @@ func BenchmarkMarshalState_FullState(b *testing.B) {
func BenchmarkUnmarshalState_FullState(b *testing.B) {
beaconState, err := benchmark.PreGenstateFullEpochs()
require.NoError(b, err)
natState, err := state_native.ProtobufBeaconStatePhase0(beaconState.ToProtoUnsafe())
pb, err := beaconState.ToProtoUnsafe()
require.NoError(b, err)
natState, err := state_native.ProtobufBeaconStatePhase0(pb)
require.NoError(b, err)
protoObject, err := proto.Marshal(natState)
require.NoError(b, err)
@@ -173,10 +181,14 @@ func BenchmarkUnmarshalState_FullState(b *testing.B) {
})
}
func clonedStates(beaconState state.BeaconState) []state.BeaconState {
func clonedStates(beaconState state.BeaconState) ([]state.BeaconState, error) {
clonedStates := make([]state.BeaconState, runAmount)
var err error
for i := 0; i < runAmount; i++ {
clonedStates[i] = beaconState.Copy()
clonedStates[i], err = beaconState.Copy()
if err != nil {
return nil, err
}
}
return clonedStates
return clonedStates, nil
}

View File

@@ -20,7 +20,9 @@ func TestSkipSlotCache_OK(t *testing.T) {
transition.SkipSlotCache.Enable()
defer transition.SkipSlotCache.Disable()
bState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProto())
bStateProto, err := bState.ToProto()
require.NoError(t, err)
pbState, err := state_native.ProtobufBeaconStatePhase0(bStateProto)
require.NoError(t, err)
originalState, err := state_native.InitializeFromProtoPhase0(pbState)
require.NoError(t, err)
@@ -42,12 +44,18 @@ func TestSkipSlotCache_OK(t *testing.T) {
bState, err = transition.ExecuteStateTransition(context.Background(), bState, wsb)
require.NoError(t, err, "Could not process state transition")
assert.DeepEqual(t, originalState.ToProto(), bState.ToProto(), "Skipped slots cache leads to different states")
originalStateProto, err := originalState.ToProto()
require.NoError(t, err)
bStateProto, err = bState.ToProto()
require.NoError(t, err)
assert.DeepEqual(t, originalStateProto, bStateProto, "Skipped slots cache leads to different states")
}
func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
bState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
pbState, err := state_native.ProtobufBeaconStatePhase0(bState.ToProto())
bStateProto, err := bState.ToProto()
require.NoError(t, err)
pbState, err := state_native.ProtobufBeaconStatePhase0(bStateProto)
require.NoError(t, err)
originalState, err := state_native.InitializeFromProtoPhase0(pbState)
require.NoError(t, err)
@@ -70,7 +78,9 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
// Create two shallow but different forks
var s1, s0 state.BeaconState
{
blk, err := util.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
c0, err := originalState.Copy()
require.NoError(t, err)
blk, err := util.GenerateFullBlock(c0, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
copy(blk.Block.Body.Graffiti, "block 1")
signature, err := util.BlockSignature(originalState, blk.Block, privs)
@@ -78,12 +88,14 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
blk.Signature = signature.Marshal()
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s1, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
s1, err = transition.ExecuteStateTransition(context.Background(), c0, wsb)
require.NoError(t, err, "Could not run state transition")
}
{
blk, err := util.GenerateFullBlock(originalState.Copy(), privs, blkCfg, originalState.Slot()+10)
c1, err := originalState.Copy()
require.NoError(t, err)
blk, err := util.GenerateFullBlock(c1, privs, blkCfg, originalState.Slot()+10)
require.NoError(t, err)
copy(blk.Block.Body.Graffiti, "block 2")
signature, err := util.BlockSignature(originalState, blk.Block, privs)
@@ -91,7 +103,7 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
blk.Signature = signature.Marshal()
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s0, err = transition.ExecuteStateTransition(context.Background(), originalState.Copy(), wsb)
s0, err = transition.ExecuteStateTransition(context.Background(), c1, wsb)
require.NoError(t, err, "Could not run state transition")
}
@@ -116,28 +128,38 @@ func TestSkipSlotCache_ConcurrentMixup(t *testing.T) {
} else {
st = s0
}
setups = append(setups, st.Copy())
c, err := st.Copy()
require.NoError(t, err)
setups = append(setups, c)
}
problemSlot := s1.Slot() + 2
expected1, err := transition.ProcessSlots(context.Background(), s1.Copy(), problemSlot)
s1Copied, err := s1.Copy()
require.NoError(t, err)
expected1, err := transition.ProcessSlots(context.Background(), s1Copied, problemSlot)
require.NoError(t, err)
expectedRoot1, err := expected1.HashTreeRoot(context.Background())
require.NoError(t, err)
t.Logf("chain 1 (even i) expected root %x at slot %d", expectedRoot1[:], problemSlot)
tmp1, err := transition.ProcessSlots(context.Background(), expected1.Copy(), problemSlot+1)
expectedS1Copied, err := expected1.Copy()
require.NoError(t, err)
tmp1, err := transition.ProcessSlots(context.Background(), expectedS1Copied, problemSlot+1)
require.NoError(t, err)
gotRoot := tmp1.StateRoots()[problemSlot]
require.DeepEqual(t, expectedRoot1[:], gotRoot, "State roots for chain 1 are bad, expected root doesn't match")
expected2, err := transition.ProcessSlots(context.Background(), s0.Copy(), problemSlot)
s0Copied, err := s0.Copy()
require.NoError(t, err)
expected2, err := transition.ProcessSlots(context.Background(), s0Copied, problemSlot)
require.NoError(t, err)
expectedRoot2, err := expected2.HashTreeRoot(context.Background())
require.NoError(t, err)
t.Logf("chain 2 (odd i) expected root %x at slot %d", expectedRoot2[:], problemSlot)
tmp2, err := transition.ProcessSlots(context.Background(), expected2.Copy(), problemSlot+1)
expectedS2Copied, err := expected2.Copy()
require.NoError(t, err)
tmp2, err := transition.ProcessSlots(context.Background(), expectedS2Copied, problemSlot+1)
require.NoError(t, err)
gotRoot = tmp2.StateRoots()[problemSlot]
require.DeepEqual(t, expectedRoot2[:], gotRoot, "State roots for chain 2 are bad, expected root doesn't match")

View File

@@ -98,9 +98,13 @@ func TestGenesisState_HashEquality(t *testing.T) {
state, err := transition.GenesisBeaconState(context.Background(), deposits, 0, &ethpb.Eth1Data{BlockHash: make([]byte, 32)})
require.NoError(t, err)
pbState1, err := state_native.ProtobufBeaconStatePhase0(state1.ToProto())
state1Proto, err := state1.ToProto()
require.NoError(t, err)
pbstate, err := state_native.ProtobufBeaconStatePhase0(state.ToProto())
stateProto, err := state.ToProto()
require.NoError(t, err)
pbState1, err := state_native.ProtobufBeaconStatePhase0(state1Proto)
require.NoError(t, err)
pbstate, err := state_native.ProtobufBeaconStatePhase0(stateProto)
require.NoError(t, err)
root1, err1 := hash.HashProto(pbState1)

View File

@@ -41,7 +41,7 @@ func NextSlotState(_ context.Context, root []byte) (state.BeaconState, error) {
}
nextSlotCacheHit.Inc()
// Returning copied state.
return nsc.state.Copy(), nil
return nsc.state.Copy()
}
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
@@ -49,8 +49,12 @@ func NextSlotState(_ context.Context, root []byte) (state.BeaconState, error) {
// This is useful to call after successfully processing a block.
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconState) error {
// Advancing one slot by using a copied state.
copied := state.Copy()
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)
copied, err := state.Copy()
if err != nil {
return err
}
copied, err = ProcessSlots(ctx, copied, copied.Slot()+1)
if err != nil {
return err
}

View File

@@ -239,7 +239,7 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
tracing.AnnotateError(span, ctx.Err())
// Cache last best value.
if highestSlot < state.Slot() {
if SkipSlotCache.Put(ctx, key, state); err != nil {
if err := SkipSlotCache.Put(ctx, key, state); err != nil {
log.WithError(err).Error("Failed to put skip slot cache value")
}
}
@@ -299,7 +299,10 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot)
}
if highestSlot < state.Slot() {
SkipSlotCache.Put(ctx, key, state)
err := SkipSlotCache.Put(ctx, key, state)
if err != nil {
return nil, err
}
}
return state, nil

View File

@@ -127,10 +127,12 @@ func CalculateStateRoot(
}
// Copy state to avoid mutating the state reference.
state = state.Copy()
state, err := state.Copy()
if err != nil {
return [32]byte{}, err
}
// Execute per slots transition.
var err error
parentRoot := signed.Block().ParentRoot()
state, err = ProcessSlotsUsingNextSlotCache(ctx, state, parentRoot[:], signed.Block().Slot())
if err != nil {
@@ -200,6 +202,17 @@ func ProcessBlockNoVerifyAnySig(
set := bls.NewSet()
set.Join(bSet).Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
}
cSet, err := b.BLSChangesSignatureBatch(st, changes)
if err != nil {
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
}
set.Join(cSet)
}
return set, st, nil
}
@@ -368,7 +381,11 @@ func altairOperations(
if _, err := altair.ProcessDeposits(ctx, st, signedBeaconBlock.Block().Body().Deposits()); err != nil {
return nil, errors.Wrap(err, "could not process altair deposit")
}
return b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
st, err = b.ProcessVoluntaryExits(ctx, st, signedBeaconBlock.Block().Body().VoluntaryExits())
if err != nil {
return nil, errors.Wrap(err, "could not process voluntary exits")
}
return b.ProcessBLSToExecutionChanges(st, signedBeaconBlock)
}
// This calls phase 0 block operations.

View File

@@ -39,7 +39,9 @@ func TestExecuteStateTransitionNoVerify_FullProcess(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
copied, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -95,7 +97,9 @@ func TestExecuteStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *test
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
copied, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)

View File

@@ -76,7 +76,9 @@ func TestExecuteStateTransition_FullProcess(t *testing.T) {
require.NoError(t, err)
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
nextSlotState, err := transition.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
copied, err := beaconState.Copy()
require.NoError(t, err)
nextSlotState, err := transition.ProcessSlots(context.Background(), copied, beaconState.Slot()+1)
require.NoError(t, err)
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
@@ -339,7 +341,8 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
require.NoError(t, beaconState.SetLatestBlockHeader(header))
parentRoot, err := beaconState.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
copied := beaconState.Copy()
copied, err := beaconState.Copy()
require.NoError(t, err)
require.NoError(t, copied.SetSlot(beaconState.Slot()+1))
randaoReveal, err := util.RandaoReveal(copied, currentEpoch, privKeys)
require.NoError(t, err)

View File

@@ -328,8 +328,8 @@ func (s *Store) SaveBlocks(ctx context.Context, blks []interfaces.SignedBeaconBl
func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
hasStateSummary := s.HasStateSummary(ctx, blockRoot)
return s.db.Update(func(tx *bolt.Tx) error {
hasStateSummary := s.hasStateSummaryBytes(tx, blockRoot)
hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil
if !(hasStateInDB || hasStateSummary) {
return errors.New("no state or state summary found with head block root")

View File

@@ -59,9 +59,9 @@ func (s *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
if err != nil {
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
log.Warnf("Recovering state summary for justified root: %#x", bytesutil.Trunc(checkpoint.Root))
@@ -82,9 +82,9 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
if err != nil {
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
log.Warnf("Recovering state summary for finalized root: %#x", bytesutil.Trunc(checkpoint.Root))

View File

@@ -92,7 +92,9 @@ func Test_migrateStateValidators(t *testing.T) {
assert.NoError(t, hashErr)
individualHashes = append(individualHashes, hash[:])
}
pbState, err := state_native.ProtobufBeaconStatePhase0(st.ToProtoUnsafe())
s, err := st.ToProtoUnsafe()
assert.NoError(t, err)
pbState, err := state_native.ProtobufBeaconStatePhase0(s)
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {
@@ -138,7 +140,11 @@ func Test_migrateStateValidators(t *testing.T) {
blockRoot := [32]byte{'A'}
rcvdState, err := dbStore.State(context.Background(), blockRoot)
assert.NoError(t, err)
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
s1, err := rcvdState.ToProtoUnsafe()
assert.NoError(t, err)
s2, err := state.ToProtoUnsafe()
assert.NoError(t, err)
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
// find hashes of the validators that are set as part of the state
var hashes []byte
@@ -151,7 +157,9 @@ func Test_migrateStateValidators(t *testing.T) {
}
// check if all the validators that were in the state, are stored properly in the validator bucket
pbState, err := state_native.ProtobufBeaconStatePhase0(rcvdState.ToProtoUnsafe())
s3, err := rcvdState.ToProtoUnsafe()
assert.NoError(t, err)
pbState, err := state_native.ProtobufBeaconStatePhase0(s3)
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {
@@ -241,7 +249,11 @@ func Test_migrateAltairStateValidators(t *testing.T) {
blockRoot := [32]byte{'A'}
rcvdState, err := dbStore.State(context.Background(), blockRoot)
assert.NoError(t, err)
require.DeepSSZEqual(t, rcvdState.ToProtoUnsafe(), state.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
s1, err := rcvdState.ToProtoUnsafe()
assert.NoError(t, err)
s2, err := state.ToProtoUnsafe()
assert.NoError(t, err)
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
// find hashes of the validators that are set as part of the state
var hashes []byte
@@ -254,7 +266,9 @@ func Test_migrateAltairStateValidators(t *testing.T) {
}
// check if all the validators that were in the state, are stored properly in the validator bucket
pbState, err := state_native.ProtobufBeaconStateAltair(rcvdState.ToProtoUnsafe())
s3, err := rcvdState.ToProtoUnsafe()
assert.NoError(t, err)
pbState, err := state_native.ProtobufBeaconStateAltair(s3)
assert.NoError(t, err)
validatorsFoundCount := 0
for _, val := range pbState.Validators {

View File

@@ -189,7 +189,11 @@ func getValidators(states []state.ReadOnlyBeaconState) ([][]byte, map[string]*et
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
for i, st := range states {
pb, ok := st.ToProtoUnsafe().(withValidators)
p, err := st.ToProtoUnsafe()
if err != nil {
return nil, nil, err
}
pb, ok := p.(withValidators)
if !ok {
return nil, nil, errors.New("could not cast state to interface with GetValidators()")
}
@@ -228,7 +232,11 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
// validator entries.To bring the gap closer, we empty the validators
// just before Put() and repopulate that state with original validators.
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
switch rawType := states[i].ToProtoUnsafe().(type) {
p, err := states[i].ToProtoUnsafe()
if err != nil {
return err
}
switch rawType := p.(type) {
case *ethpb.BeaconState:
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
if err != nil {
@@ -534,15 +542,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
// marshal versioned state from struct type down to bytes.
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
switch st.ToProtoUnsafe().(type) {
p, err := st.ToProtoUnsafe()
if err != nil {
return nil, err
}
switch p.(type) {
case *ethpb.BeaconState:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconState)
rState, ok := p.(*ethpb.BeaconState)
if !ok {
return nil, errors.New("non valid inner state")
}
return encode(ctx, rState)
case *ethpb.BeaconStateAltair:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateAltair)
rState, ok := p.(*ethpb.BeaconStateAltair)
if !ok {
return nil, errors.New("non valid inner state")
}
@@ -555,7 +567,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
}
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
case *ethpb.BeaconStateBellatrix:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateBellatrix)
rState, ok := p.(*ethpb.BeaconStateBellatrix)
if !ok {
return nil, errors.New("non valid inner state")
}
@@ -568,7 +580,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
}
return snappy.Encode(nil, append(bellatrixKey, rawObj...)), nil
case *ethpb.BeaconStateCapella:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
rState, ok := p.(*ethpb.BeaconStateCapella)
if !ok {
return nil, errors.New("non valid inner state")
}

View File

@@ -67,9 +67,14 @@ func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
ctx, span := trace.StartSpan(ctx, "BeaconDB.HasStateSummary")
defer span.End()
if s.stateSummaryCache.has(blockRoot) {
return true
}
var hasSummary bool
if err := s.db.View(func(tx *bolt.Tx) error {
hasSummary = s.hasStateSummaryBytes(tx, blockRoot)
enc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
hasSummary = len(enc) > 0
return nil
}); err != nil {
return false
@@ -77,14 +82,6 @@ func (s *Store) HasStateSummary(ctx context.Context, blockRoot [32]byte) bool {
return hasSummary
}
func (s *Store) hasStateSummaryBytes(tx *bolt.Tx, blockRoot [32]byte) bool {
if s.stateSummaryCache.has(blockRoot) {
return true
}
enc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
return len(enc) > 0
}
// This saves all cached state summary objects to DB, and clears up the cache.
func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
summaries := s.stateSummaryCache.getAll()

View File

@@ -44,7 +44,11 @@ func TestState_CanSaveRetrieve(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state and retrieved state are not matching")
s1, err := st.ToProtoUnsafe()
require.NoError(t, err)
s2, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s1, s2, "saved state and retrieved state are not matching")
savedS, err = db.State(context.Background(), [32]byte{'B'})
require.NoError(t, err)
@@ -77,7 +81,11 @@ func TestState_CanSaveRetrieveValidatorEntries(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
s1, err := st.ToProtoUnsafe()
require.NoError(t, err)
s2, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
// check if the index of the second state is still present.
err = db.db.Update(func(tx *bolt.Tx) error {
@@ -129,7 +137,11 @@ func TestStateAltair_CanSaveRetrieveValidatorEntries(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
s1, err := st.ToProtoUnsafe()
require.NoError(t, err)
s2, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
// check if the index of the second state is still present.
err = db.db.Update(func(tx *bolt.Tx) error {
@@ -239,7 +251,11 @@ func TestState_CanSaveRetrieveValidatorEntriesWithoutCache(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
s1, err := st.ToProtoUnsafe()
require.NoError(t, err)
s2, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s1, s2, "saved state with validators and retrieved state are not matching")
// check if the index of the second state is still present.
err = db.db.Update(func(tx *bolt.Tx) error {
@@ -360,7 +376,11 @@ func TestGenesisState_CanSaveRetrieve(t *testing.T) {
savedGenesisS, err := db.GenesisState(context.Background())
require.NoError(t, err)
assert.DeepSSZEqual(t, st.ToProtoUnsafe(), savedGenesisS.ToProtoUnsafe(), "Did not retrieve saved state")
s1, err := st.ToProtoUnsafe()
require.NoError(t, err)
s2, err := savedGenesisS.ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, s1, s2, "Did not retrieve saved state")
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), [32]byte{'C'}))
}
@@ -481,7 +501,8 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
s0 := st.ToProtoUnsafe()
s0, err := st.ToProtoUnsafe()
require.NoError(t, err)
require.NoError(t, db.SaveState(context.Background(), st, r))
b.Block.Slot = 100
@@ -493,7 +514,8 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
st, err = util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
s1 := st.ToProtoUnsafe()
s1, err := st.ToProtoUnsafe()
require.NoError(t, err)
require.NoError(t, db.SaveState(context.Background(), st, r1))
b.Block.Slot = 1000
@@ -505,21 +527,27 @@ func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
st, err = util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(1000))
s2 := st.ToProtoUnsafe()
s2, err := st.ToProtoUnsafe()
require.NoError(t, err)
require.NoError(t, db.SaveState(context.Background(), st, r2))
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
require.NoError(t, err)
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s0)
want, err := highest[0].ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, want, s0)
highest, err = db.HighestSlotStatesBelow(context.Background(), 101)
require.NoError(t, err)
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s1)
want, err = highest[0].ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, want, s1)
highest, err = db.HighestSlotStatesBelow(context.Background(), 1001)
require.NoError(t, err)
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), s2)
want, err = highest[0].ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, want, s2)
}
func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
@@ -546,14 +574,24 @@ func TestStore_GenesisState_CanGetHighestBelow(t *testing.T) {
highest, err := db.HighestSlotStatesBelow(context.Background(), 2)
require.NoError(t, err)
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), st.ToProtoUnsafe())
want, err := highest[0].ToProtoUnsafe()
require.NoError(t, err)
gotSt, err := st.ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, want, gotSt)
highest, err = db.HighestSlotStatesBelow(context.Background(), 1)
require.NoError(t, err)
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), genesisState.ToProtoUnsafe())
gs, err := genesisState.ToProtoUnsafe()
require.NoError(t, err)
want, err = highest[0].ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, want, gs)
highest, err = db.HighestSlotStatesBelow(context.Background(), 0)
require.NoError(t, err)
assert.DeepSSZEqual(t, highest[0].ToProtoUnsafe(), genesisState.ToProtoUnsafe())
want, err = highest[0].ToProtoUnsafe()
require.NoError(t, err)
assert.DeepSSZEqual(t, want, gs)
}
func TestStore_CleanUpDirtyStates_AboveThreshold(t *testing.T) {
@@ -680,7 +718,11 @@ func TestAltairState_CanSaveRetrieve(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
s0, err := st.ToProtoUnsafe()
require.NoError(t, err)
s1, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s0, s1)
savedS, err = db.State(context.Background(), [32]byte{'B'})
require.NoError(t, err)
@@ -830,8 +872,11 @@ func TestStateBellatrix_CanSaveRetrieveValidatorEntries(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe(), "saved state with validators and retrieved state are not matching")
s0, err := st.ToProtoUnsafe()
require.NoError(t, err)
s1, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s0, s1, "saved state with validators and retrieved state are not matching")
// check if the index of the second state is still present.
err = db.db.Update(func(tx *bolt.Tx) error {
@@ -873,8 +918,11 @@ func TestBellatrixState_CanSaveRetrieve(t *testing.T) {
savedS, err := db.State(context.Background(), r)
require.NoError(t, err)
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
s0, err := st.ToProtoUnsafe()
require.NoError(t, err)
s1, err := savedS.ToProtoUnsafe()
require.NoError(t, err)
require.DeepSSZEqual(t, s0, s1)
savedS, err = db.State(context.Background(), [32]byte{'B'})
require.NoError(t, err)

View File

@@ -38,9 +38,9 @@ func (s *Store) SaveLastValidatedCheckpoint(ctx context.Context, checkpoint *eth
if err != nil {
return err
}
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
hasStateSummary := s.hasStateSummaryBytes(tx, bytesutil.ToBytes32(checkpoint.Root))
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
if !(hasStateInDB || hasStateSummary) {
log.Warnf("Recovering state summary for last validated root: %#x", bytesutil.Trunc(checkpoint.Root))

View File

@@ -51,6 +51,26 @@ func TestStore_LastValidatedCheckpoint_Recover(t *testing.T) {
assert.Equal(t, true, proto.Equal(cp, retrieved), "Wanted %v, received %v", cp, retrieved)
}
func BenchmarkStore_SaveLastValidatedCheckpoint(b *testing.B) {
db := setupDB(b)
ctx := context.Background()
root := bytesutil.ToBytes32([]byte{'A'})
cp := &ethpb.Checkpoint{
Epoch: 10,
Root: root[:],
}
st, err := util.NewBeaconState()
require.NoError(b, err)
require.NoError(b, st.SetSlot(1))
require.NoError(b, db.SaveState(ctx, st, root))
db.stateSummaryCache.clear()
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.NoError(b, db.SaveLastValidatedCheckpoint(ctx, cp))
}
}
func TestStore_LastValidatedCheckpoint_DefaultIsFinalized(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -41,6 +41,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//container/trie:go_default_library",
"//contracts/deposit:go_default_library",
"//crypto/hash:go_default_library",
@@ -52,6 +53,7 @@ go_library(
"//network/authorization:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//:go_default_library",
@@ -108,6 +110,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//contracts/deposit:go_default_library",

View File

@@ -19,8 +19,10 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -30,6 +32,8 @@ const (
NewPayloadMethod = "engine_newPayloadV1"
// ForkchoiceUpdatedMethod v1 request string for JSON-RPC.
ForkchoiceUpdatedMethod = "engine_forkchoiceUpdatedV1"
// ForkchoiceUpdatedMethodV2 v2 request string for JSON-RPC.
ForkchoiceUpdatedMethodV2 = "engine_forkchoiceUpdatedV2"
// GetPayloadMethod v1 request string for JSON-RPC.
GetPayloadMethod = "engine_getPayloadV1"
// ExchangeTransitionConfigurationMethod v1 request string for JSON-RPC.
@@ -66,7 +70,7 @@ type ExecutionPayloadReconstructor interface {
type EngineCaller interface {
NewPayload(ctx context.Context, payload interfaces.ExecutionData) ([]byte, error)
ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error)
GetPayload(ctx context.Context, payloadId [8]byte) (*pb.ExecutionPayload, error)
ExchangeTransitionConfiguration(
@@ -114,7 +118,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
// ForkchoiceUpdated calls the engine_forkchoiceUpdatedV1 method via JSON-RPC.
func (s *Service) ForkchoiceUpdated(
ctx context.Context, state *pb.ForkchoiceState, attrs *pb.PayloadAttributes,
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ForkchoiceUpdated")
defer span.End()
@@ -127,9 +131,31 @@ func (s *Service) ForkchoiceUpdated(
ctx, cancel := context.WithDeadline(ctx, d)
defer cancel()
result := &ForkchoiceUpdatedResponse{}
err := s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, attrs)
if err != nil {
return nil, nil, handleRPCError(err)
if attrs == nil {
return nil, nil, errors.New("nil payload attributer")
}
switch attrs.Version() {
case version.Bellatrix:
a, err := attrs.PbV1()
if err != nil {
return nil, nil, err
}
err = s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethod, state, a)
if err != nil {
return nil, nil, handleRPCError(err)
}
case version.Capella:
a, err := attrs.PbV2()
if err != nil {
return nil, nil, err
}
err = s.rpcClient.CallContext(ctx, result, ForkchoiceUpdatedMethodV2, state, a)
if err != nil {
return nil, nil, handleRPCError(err)
}
default:
return nil, nil, fmt.Errorf("unknown payload attribute version: %v", attrs.Version())
}
if result.Status == nil {

View File

@@ -24,6 +24,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
@@ -72,7 +73,19 @@ func TestClient_IPC(t *testing.T) {
t.Run(ForkchoiceUpdatedMethod, func(t *testing.T) {
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, &pb.PayloadAttributes{})
p, err := payloadattribute.New(&pb.PayloadAttributes{})
require.NoError(t, err)
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, p)
require.NoError(t, err)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
require.DeepEqual(t, want.PayloadId, payloadID)
})
t.Run(ForkchoiceUpdatedMethodV2, func(t *testing.T) {
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
p, err := payloadattribute.New(&pb.PayloadAttributesV2{})
require.NoError(t, err)
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, &pb.ForkchoiceState{}, p)
require.NoError(t, err)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
require.DeepEqual(t, want.PayloadId, payloadID)
@@ -168,12 +181,38 @@ func TestClient_HTTP(t *testing.T) {
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
p, err := payloadattribute.New(payloadAttributes)
require.NoError(t, err)
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
srv := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
require.NoError(t, err)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
require.DeepEqual(t, want.PayloadId, payloadID)
})
t.Run(ForkchoiceUpdatedMethodV2+" VALID status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributesV2{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
Withdrawals: []*pb.Withdrawal{{ValidatorIndex: 1, Amount: 1}},
}
p, err := payloadattribute.New(payloadAttributes)
require.NoError(t, err)
want, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
srv := forkchoiceUpdateSetupV2(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
require.NoError(t, err)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
require.DeepEqual(t, want.PayloadId, payloadID)
@@ -189,12 +228,38 @@ func TestClient_HTTP(t *testing.T) {
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
p, err := payloadattribute.New(payloadAttributes)
require.NoError(t, err)
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
})
t.Run(ForkchoiceUpdatedMethodV2+" SYNCING status", func(t *testing.T) {
forkChoiceState := &pb.ForkchoiceState{
HeadBlockHash: []byte("head"),
SafeBlockHash: []byte("safe"),
FinalizedBlockHash: []byte("finalized"),
}
payloadAttributes := &pb.PayloadAttributesV2{
Timestamp: 1,
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
Withdrawals: []*pb.Withdrawal{{ValidatorIndex: 1, Amount: 1}},
}
p, err := payloadattribute.New(payloadAttributes)
require.NoError(t, err)
want, ok := fix["ForkchoiceUpdatedSyncingResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
srv := forkchoiceUpdateSetupV2(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := srv.ForkchoiceUpdated(ctx, forkChoiceState, p)
require.ErrorIs(t, err, ErrAcceptedSyncingPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
@@ -210,12 +275,14 @@ func TestClient_HTTP(t *testing.T) {
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
p, err := payloadattribute.New(payloadAttributes)
require.NoError(t, err)
want, ok := fix["ForkchoiceUpdatedInvalidResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
require.ErrorIs(t, err, ErrInvalidPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, want.Status.LatestValidHash, validHash)
@@ -231,12 +298,14 @@ func TestClient_HTTP(t *testing.T) {
PrevRandao: []byte("random"),
SuggestedFeeRecipient: []byte("suggestedFeeRecipient"),
}
p, err := payloadattribute.New(payloadAttributes)
require.NoError(t, err)
want, ok := fix["ForkchoiceUpdatedAcceptedResponse"].(*ForkchoiceUpdatedResponse)
require.Equal(t, true, ok)
client := forkchoiceUpdateSetup(t, forkChoiceState, payloadAttributes, want)
// We call the RPC method via HTTP and expect a proper result.
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, payloadAttributes)
payloadID, validHash, err := client.ForkchoiceUpdated(ctx, forkChoiceState, p)
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
require.DeepEqual(t, (*pb.PayloadIDBytes)(nil), payloadID)
require.DeepEqual(t, []byte(nil), validHash)
@@ -1362,6 +1431,18 @@ func (*testEngineService) ForkchoiceUpdatedV1(
return item
}
func (*testEngineService) ForkchoiceUpdatedV2(
_ context.Context, _ *pb.ForkchoiceState, _ *pb.PayloadAttributes,
) *ForkchoiceUpdatedResponse {
fix := fixtures()
item, ok := fix["ForkchoiceUpdatedResponse"].(*ForkchoiceUpdatedResponse)
if !ok {
panic("not found")
}
item.Status.Status = pb.PayloadStatus_VALID
return item
}
func (*testEngineService) NewPayloadV1(
_ context.Context, _ *pb.ExecutionPayload,
) *pb.PayloadStatus {
@@ -1412,6 +1493,45 @@ func forkchoiceUpdateSetup(t *testing.T, fcs *pb.ForkchoiceState, att *pb.Payloa
return service
}
func forkchoiceUpdateSetupV2(t *testing.T, fcs *pb.ForkchoiceState, att *pb.PayloadAttributesV2, res *ForkchoiceUpdatedResponse) *Service {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer func() {
require.NoError(t, r.Body.Close())
}()
enc, err := io.ReadAll(r.Body)
require.NoError(t, err)
jsonRequestString := string(enc)
forkChoiceStateReq, err := json.Marshal(fcs)
require.NoError(t, err)
payloadAttrsReq, err := json.Marshal(att)
require.NoError(t, err)
// We expect the JSON string RPC request contains the right arguments.
require.Equal(t, true, strings.Contains(
jsonRequestString, string(forkChoiceStateReq),
))
require.Equal(t, true, strings.Contains(
jsonRequestString, string(payloadAttrsReq),
))
resp := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": res,
}
err = json.NewEncoder(w).Encode(resp)
require.NoError(t, err)
}))
rpcClient, err := rpc.DialHTTP(srv.URL)
require.NoError(t, err)
service := &Service{}
service.rpcClient = rpcClient
return service
}
func newPayloadSetup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayload) *Service {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")

View File

@@ -551,7 +551,11 @@ func (s *Service) processChainStartIfReady(ctx context.Context, blockHash [32]by
// savePowchainData saves all powchain related metadata to disk.
func (s *Service) savePowchainData(ctx context.Context) error {
pbState, err := statenative.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
st, err := s.preGenesisState.ToProtoUnsafe()
if err != nil {
return err
}
pbState, err := statenative.ProtobufBeaconStatePhase0(st)
if err != nil {
return err
}

View File

@@ -777,7 +777,11 @@ func (s *Service) ensureValidPowchainData(ctx context.Context) error {
return errors.Wrap(err, "unable to retrieve eth1 data")
}
if eth1Data == nil || !eth1Data.ChainstartData.Chainstarted || !validateDepositContainers(eth1Data.DepositContainers) {
pbState, err := native.ProtobufBeaconStatePhase0(s.preGenesisState.ToProtoUnsafe())
st, err := s.preGenesisState.ToProtoUnsafe()
if err != nil {
return err
}
pbState, err := native.ProtobufBeaconStatePhase0(st)
if err != nil {
return err
}

View File

@@ -20,6 +20,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v3/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
pb "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
)
@@ -43,7 +44,7 @@ func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData)
// ForkchoiceUpdated --
func (e *EngineClient) ForkchoiceUpdated(
_ context.Context, fcs *pb.ForkchoiceState, _ *pb.PayloadAttributes,
_ context.Context, fcs *pb.ForkchoiceState, _ payloadattribute.Attributer,
) (*pb.PayloadIDBytes, []byte, error) {
if e.OverrideValidHash != [32]byte{} && bytesutil.ToBytes32(fcs.HeadBlockHash) == e.OverrideValidHash {
return e.PayloadIDBytes, e.ForkChoiceUpdatedResp, nil

View File

@@ -616,8 +616,8 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
return node.payloadHash
}
// ForkChoiceDump returns a full dump of forkhoice.
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse, error) {
// ForkChoiceDump returns a full dump of forkchoice.
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, error) {
jc := &v1.Checkpoint{
Epoch: f.store.justifiedCheckpoint.Epoch,
Root: f.store.justifiedCheckpoint.Root[:],
@@ -650,7 +650,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse
if f.store.headNode != nil {
headRoot = f.store.headNode.root
}
resp := &v1.ForkChoiceResponse{
resp := &v1.ForkChoiceDump{
JustifiedCheckpoint: jc,
BestJustifiedCheckpoint: bjc,
UnrealizedJustifiedCheckpoint: ujc,
@@ -659,7 +659,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse
ProposerBoostRoot: f.store.proposerBoostRoot[:],
PreviousProposerBoostRoot: f.store.previousProposerBoostRoot[:],
HeadRoot: headRoot[:],
ForkchoiceNodes: nodes,
ForkChoiceNodes: nodes,
}
return resp, nil

View File

@@ -18,7 +18,7 @@ var (
nodeCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "doublylinkedtree_node_count",
Help: "The number of nodes in the DAG array based store structure.",
Help: "The number of nodes in the doubly linked tree based store structure.",
},
)
headChangesCount = promauto.NewCounter(

View File

@@ -138,7 +138,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
}
thisNode := &v1.ForkChoiceNode{
Slot: n.slot,
Root: n.root[:],
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
@@ -147,9 +147,14 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: n.optimistic,
ExecutionPayload: n.payloadHash[:],
ExecutionBlockHash: n.payloadHash[:],
Timestamp: n.timestamp,
}
if n.optimistic {
thisNode.Validity = v1.ForkChoiceNodeValidity_OPTIMISTIC
} else {
thisNode.Validity = v1.ForkChoiceNodeValidity_VALID
}
nodes = append(nodes, thisNode)
var err error

View File

@@ -250,7 +250,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
for i, respNode := range respNodes {
require.Equal(t, storeNodes[i].slot, respNode.Slot)
require.DeepEqual(t, storeNodes[i].root[:], respNode.Root)
require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
require.Equal(t, storeNodes[i].balance, respNode.Balance)
require.Equal(t, storeNodes[i].weight, respNode.Weight)
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)

View File

@@ -68,7 +68,7 @@ type Getter interface {
HighestReceivedBlockSlot() types.Slot
HighestReceivedBlockRoot() [32]byte
ReceivedBlocksLastEpoch() (uint64, error)
ForkChoiceDump(context.Context) (*v1.ForkChoiceResponse, error)
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
VotedFraction(root [32]byte) (uint64, error)
}

View File

@@ -32,6 +32,7 @@ go_library(
"//beacon-chain/monitor:go_default_library",
"//beacon-chain/node/registration:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",

View File

@@ -34,6 +34,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/monitor"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/node/registration"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits"
@@ -94,6 +95,7 @@ type BeaconNode struct {
exitPool voluntaryexits.PoolManager
slashingsPool slashings.PoolManager
syncCommitteePool synccommittee.Pool
blsToExecPool blstoexec.PoolManager
depositCache *depositcache.DepositCache
proposerIdsCache *cache.ProposerPayloadIDsCache
stateFeed *event.Feed
@@ -171,6 +173,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
exitPool: voluntaryexits.NewPool(),
slashingsPool: slashings.NewPool(),
syncCommitteePool: synccommittee.NewPool(),
blsToExecPool: blstoexec.NewPool(),
slasherBlockHeadersFeed: new(event.Feed),
slasherAttestationsFeed: new(event.Feed),
serviceFlagOpts: &serviceFlagOpts{},
@@ -596,6 +599,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer) error
blockchain.WithAttestationPool(b.attestationPool),
blockchain.WithExitPool(b.exitPool),
blockchain.WithSlashingPool(b.slashingsPool),
blockchain.WithBLSToExecPool(b.blsToExecPool),
blockchain.WithP2PBroadcaster(b.fetchP2P()),
blockchain.WithStateNotifier(b),
blockchain.WithAttestationService(attService),
@@ -674,6 +678,7 @@ func (b *BeaconNode) registerSyncService() error {
regularsync.WithExitPool(b.exitPool),
regularsync.WithSlashingPool(b.slashingsPool),
regularsync.WithSyncCommsPool(b.syncCommitteePool),
regularsync.WithBlsToExecPool(b.blsToExecPool),
regularsync.WithStateGen(b.stateGen),
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),

View File

@@ -11,10 +11,15 @@ go_library(
"//beacon-chain:__subpackages__",
],
deps = [
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/doubly-linked-list:go_default_library",
"//crypto/bls/blst:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
@@ -24,8 +29,15 @@ go_test(
srcs = ["pool_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/ssz:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -4,19 +4,25 @@ import (
"math"
"sync"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
doublylinkedlist "github.com/prysmaticlabs/prysm/v3/container/doubly-linked-list"
"github.com/prysmaticlabs/prysm/v3/crypto/bls/blst"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
)
// PoolManager maintains pending and seen BLS-to-execution-change objects.
// This pool is used by proposers to insert BLS-to-execution-change objects into new blocks.
type PoolManager interface {
PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, error)
BLSToExecChangesForInclusion() ([]*ethpb.SignedBLSToExecutionChange, error)
BLSToExecChangesForInclusion(state.BeaconState) ([]*ethpb.SignedBLSToExecutionChange, error)
InsertBLSToExecChange(change *ethpb.SignedBLSToExecutionChange)
MarkIncluded(change *ethpb.SignedBLSToExecutionChange) error
ValidatorExists(idx types.ValidatorIndex) bool
}
// Pool is a concrete implementation of PoolManager.
@@ -57,25 +63,69 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
// BLSToExecChangesForInclusion returns objects that are ready for inclusion at the given slot.
// This method will not return more than the block enforced MaxBlsToExecutionChanges.
func (p *Pool) BLSToExecChangesForInclusion() ([]*ethpb.SignedBLSToExecutionChange, error) {
func (p *Pool) BLSToExecChangesForInclusion(st state.BeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
p.lock.RLock()
defer p.lock.RUnlock()
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
result := make([]*ethpb.SignedBLSToExecutionChange, length)
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
node := p.pending.First()
var err error
for i := 0; node != nil && i < length; i++ {
result[i], err = node.Value()
for node != nil && len(result) < length {
change, err := node.Value()
if err != nil {
p.lock.RUnlock()
return nil, err
}
_, err = blocks.ValidateBLSToExecutionChange(st, change)
if err != nil {
logrus.WithError(err).Warning("removing invalid BLSToExecutionChange from pool")
// MarkIncluded removes the invalid change from the pool
p.lock.RUnlock()
if err := p.MarkIncluded(change); err != nil {
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
}
p.lock.RLock()
} else {
result = append(result, change)
}
node, err = node.Next()
if err != nil {
p.lock.RUnlock()
return nil, err
}
}
return result, nil
p.lock.RUnlock()
if len(result) == 0 {
return result, nil
}
// We now verify the signatures in batches
cSet, err := blocks.BLSChangesSignatureBatch(st, result)
if err != nil {
logrus.WithError(err).Warning("could not get BLSToExecutionChanges signatures")
} else {
ok, err := cSet.Verify()
if err != nil {
logrus.WithError(err).Warning("could not batch verify BLSToExecutionChanges signatures")
} else if ok {
return result, nil
}
}
// Batch signature failed, check signatures individually
verified := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
for i, sig := range cSet.Signatures {
signature, err := blst.SignatureFromBytes(sig)
if err != nil {
logrus.WithError(err).Warning("could not get signature from bytes")
continue
}
if !signature.Verify(cSet.PublicKeys[i], cSet.Messages[i][:]) {
logrus.Warning("removing BLSToExecutionChange with invalid signature from pool")
if err := p.MarkIncluded(result[i]); err != nil {
return nil, errors.Wrap(err, "could not mark BLSToExecutionChange as included")
}
} else {
verified = append(verified, result[i])
}
}
return verified, nil
}
// InsertBLSToExecChange inserts an object into the pool.
@@ -107,3 +157,14 @@ func (p *Pool) MarkIncluded(change *ethpb.SignedBLSToExecutionChange) error {
p.pending.Remove(node)
return nil
}
// ValidatorExists checks if the bls to execution change object exists
// for that particular validator.
func (p *Pool) ValidatorExists(idx types.ValidatorIndex) bool {
p.lock.RLock()
defer p.lock.RUnlock()
node := p.m[idx]
return node != nil
}

View File

@@ -3,8 +3,15 @@ package blstoexec
import (
"testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
state_native "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/crypto/bls/common"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
eth "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
@@ -36,42 +43,89 @@ func TestPendingBLSToExecChanges(t *testing.T) {
}
func TestBLSToExecChangesForInclusion(t *testing.T) {
spb := &eth.BeaconStateCapella{
Fork: &eth.Fork{
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
},
}
numValidators := 2 * params.BeaconConfig().MaxBlsToExecutionChanges
validators := make([]*eth.Validator, numValidators)
blsChanges := make([]*eth.BLSToExecutionChange, numValidators)
spb.Balances = make([]uint64, numValidators)
privKeys := make([]common.SecretKey, numValidators)
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
executionAddress := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13}
for i := range validators {
v := &eth.Validator{}
v.EffectiveBalance = maxEffectiveBalance
v.WithdrawableEpoch = params.BeaconConfig().FarFutureEpoch
v.WithdrawalCredentials = make([]byte, 32)
priv, err := bls.RandKey()
require.NoError(t, err)
privKeys[i] = priv
pubkey := priv.PublicKey().Marshal()
message := &eth.BLSToExecutionChange{
ToExecutionAddress: executionAddress,
ValidatorIndex: types.ValidatorIndex(i),
FromBlsPubkey: pubkey,
}
hashFn := ssz.NewHasherFunc(hash.CustomSHA256Hasher())
digest := hashFn.Hash(pubkey)
digest[0] = params.BeaconConfig().BLSWithdrawalPrefixByte
copy(v.WithdrawalCredentials, digest[:])
validators[i] = v
blsChanges[i] = message
}
spb.Validators = validators
st, err := state_native.InitializeFromProtoCapella(spb)
require.NoError(t, err)
signedChanges := make([]*eth.SignedBLSToExecutionChange, numValidators)
for i, message := range blsChanges {
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, privKeys[i])
require.NoError(t, err)
signed := &eth.SignedBLSToExecutionChange{
Message: message,
Signature: signature,
}
signedChanges[i] = signed
}
t.Run("empty pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges-1; i++ {
pool.InsertBLSToExecChange(&eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(i),
},
})
}
changes, err := pool.BLSToExecChangesForInclusion()
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges-1), len(changes))
assert.Equal(t, 0, len(changes))
})
t.Run("Less than MaxBlsToExecutionChanges in pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges-1; i++ {
pool.InsertBLSToExecChange(signedChanges[i])
}
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges)-1, len(changes))
})
t.Run("MaxBlsToExecutionChanges in pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges; i++ {
pool.InsertBLSToExecChange(&eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(i),
},
})
pool.InsertBLSToExecChange(signedChanges[i])
}
changes, err := pool.BLSToExecChangesForInclusion()
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
})
t.Run("more than MaxBlsToExecutionChanges in pool", func(t *testing.T) {
pool := NewPool()
for i := uint64(0); i < params.BeaconConfig().MaxBlsToExecutionChanges+1; i++ {
pool.InsertBLSToExecChange(&eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(i),
},
})
for i := uint64(0); i < numValidators; i++ {
pool.InsertBLSToExecChange(signedChanges[i])
}
changes, err := pool.BLSToExecChangesForInclusion()
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
// We want FIFO semantics, which means validator with index 16 shouldn't be returned
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
@@ -79,6 +133,30 @@ func TestBLSToExecChangesForInclusion(t *testing.T) {
assert.NotEqual(t, types.ValidatorIndex(16), ch.Message.ValidatorIndex)
}
})
t.Run("One Bad change", func(t *testing.T) {
pool := NewPool()
saveByte := signedChanges[1].Message.FromBlsPubkey[5]
signedChanges[1].Message.FromBlsPubkey[5] = 0xff
for i := uint64(0); i < numValidators; i++ {
pool.InsertBLSToExecChange(signedChanges[i])
}
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges), len(changes))
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
signedChanges[1].Message.FromBlsPubkey[5] = saveByte
})
t.Run("One Bad Signature", func(t *testing.T) {
pool := NewPool()
copy(signedChanges[1].Signature, signedChanges[2].Signature)
for i := uint64(0); i < numValidators; i++ {
pool.InsertBLSToExecChange(signedChanges[i])
}
changes, err := pool.BLSToExecChangesForInclusion(st)
require.NoError(t, err)
assert.Equal(t, int(params.BeaconConfig().MaxBlsToExecutionChanges)-1, len(changes))
assert.Equal(t, types.ValidatorIndex(2), changes[1].Message.ValidatorIndex)
})
}
func TestInsertBLSToExecChange(t *testing.T) {
@@ -256,3 +334,76 @@ func TestMarkIncluded(t *testing.T) {
assert.NotNil(t, pool.m[1])
})
}
func TestValidatorExists(t *testing.T) {
t.Run("no validators in pool", func(t *testing.T) {
pool := NewPool()
assert.Equal(t, false, pool.ValidatorExists(0))
})
t.Run("validator added to pool", func(t *testing.T) {
pool := NewPool()
change := &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(0),
}}
pool.InsertBLSToExecChange(change)
assert.Equal(t, true, pool.ValidatorExists(0))
})
t.Run("multiple validators added to pool", func(t *testing.T) {
pool := NewPool()
change := &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(0),
}}
pool.InsertBLSToExecChange(change)
change = &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(10),
}}
pool.InsertBLSToExecChange(change)
change = &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(30),
}}
pool.InsertBLSToExecChange(change)
assert.Equal(t, true, pool.ValidatorExists(0))
assert.Equal(t, true, pool.ValidatorExists(10))
assert.Equal(t, true, pool.ValidatorExists(30))
})
t.Run("validator added and then removed", func(t *testing.T) {
pool := NewPool()
change := &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(0),
}}
pool.InsertBLSToExecChange(change)
require.NoError(t, pool.MarkIncluded(change))
assert.Equal(t, false, pool.ValidatorExists(0))
})
t.Run("multiple validators added to pool and removed", func(t *testing.T) {
pool := NewPool()
firstChange := &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(0),
}}
pool.InsertBLSToExecChange(firstChange)
secondChange := &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(10),
}}
pool.InsertBLSToExecChange(secondChange)
thirdChange := &eth.SignedBLSToExecutionChange{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: types.ValidatorIndex(30),
}}
pool.InsertBLSToExecChange(thirdChange)
assert.NoError(t, pool.MarkIncluded(firstChange))
assert.NoError(t, pool.MarkIncluded(thirdChange))
assert.Equal(t, false, pool.ValidatorExists(0))
assert.Equal(t, true, pool.ValidatorExists(10))
assert.Equal(t, false, pool.ValidatorExists(30))
})
}

View File

@@ -174,6 +174,7 @@ go_test(
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/host/blank:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/net/swarm/testing:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",

View File

@@ -41,6 +41,10 @@ func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow b
// InterceptAccept checks whether the incidental inbound connection is allowed.
func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
// Deny all incoming connections before we are ready
if !s.started {
return false
}
if !s.validateDial(n.RemoteMultiaddr()) {
// Allow other go-routines to run in the event
// we receive a large amount of junk connections.

View File

@@ -40,6 +40,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
s.cfg = &Config{MaxPeers: 0}
s.addrFilter, err = configureFilter(&Config{})
require.NoError(t, err)
s.started = true
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
require.NoError(t, err)
s.host = h1
@@ -83,6 +84,7 @@ func TestService_InterceptBannedIP(t *testing.T) {
ip := "212.67.10.122"
multiAddress, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
require.NoError(t, err)
s.started = true
for i := 0; i < ipBurst; i++ {
valid := s.validateDial(multiAddress)
@@ -96,6 +98,37 @@ func TestService_InterceptBannedIP(t *testing.T) {
}
}
func TestService_RejectInboundConnectionBeforeStarted(t *testing.T) {
limit := 1
s := &Service{
ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, 1*time.Second, false),
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: limit,
ScorerParams: &scorers.Config{},
}),
host: mockp2p.NewTestP2P(t).BHost,
cfg: &Config{MaxPeers: uint(limit)},
}
var err error
s.addrFilter, err = configureFilter(&Config{})
require.NoError(t, err)
ip := "212.67.10.122"
multiAddress, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
require.NoError(t, err)
valid := s.InterceptAccept(&maEndpoints{raddr: multiAddress})
if valid {
t.Errorf("Expected multiaddress with ip %s to be rejected as p2p service is not ready", ip)
}
s.started = true
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
if !valid {
t.Errorf("Expected multiaddress with ip %s to be accepted after service is started", ip)
}
}
func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
limit := 20
s := &Service{
@@ -113,6 +146,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
ip := "212.67.10.122"
multiAddress, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ip, 3000))
require.NoError(t, err)
s.started = true
valid := s.InterceptAccept(&maEndpoints{raddr: multiAddress})
if !valid {
@@ -157,6 +191,7 @@ func TestPeer_BelowMaxLimit(t *testing.T) {
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
require.NoError(t, err)
s.host = h1
s.started = true
defer func() {
err := h1.Close()
require.NoError(t, err)
@@ -202,6 +237,7 @@ func TestPeerAllowList(t *testing.T) {
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
require.NoError(t, err)
s.host = h1
s.started = true
defer func() {
err := h1.Close()
require.NoError(t, err)
@@ -248,6 +284,7 @@ func TestPeerDenyList(t *testing.T) {
h1, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.ConnectionGater(s)}...)
require.NoError(t, err)
s.host = h1
s.started = true
defer func() {
err := h1.Close()
require.NoError(t, err)

View File

@@ -115,7 +115,9 @@ func TestSszNetworkEncoder_DecodeWithMultipleFrames(t *testing.T) {
maxChunkSize := uint64(1 << 22)
encoder.MaxChunkSize = maxChunkSize
params.OverrideBeaconNetworkConfig(c)
_, err := e.EncodeWithMaxLength(buf, st.ToProtoUnsafe().(*ethpb.BeaconState))
pb, err := st.ToProtoUnsafe()
require.NoError(t, err)
_, err = e.EncodeWithMaxLength(buf, pb.(*ethpb.BeaconState))
require.NoError(t, err)
// Max snappy block size
if buf.Len() <= 76490 {

View File

@@ -41,6 +41,9 @@ const (
// voluntaryExitWeight specifies the scoring weight that we apply to
// our voluntary exit topic.
voluntaryExitWeight = 0.05
// blsToExecutionChangeWeight specifies the scoring weight that we apply to
// our bls to execution topic.
blsToExecutionChangeWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -116,6 +119,8 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultProposerSlashingTopicParams(), nil
case strings.Contains(topic, GossipAttesterSlashingMessage):
return defaultAttesterSlashingTopicParams(), nil
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
return defaultBlsToExecutionChangeTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -473,6 +478,28 @@ func defaultVoluntaryExitTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: blsToExecutionChangeWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func oneSlotDuration() time.Duration {
return time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
}
@@ -531,7 +558,7 @@ func scoreByWeight(weight, threshold float64) float64 {
func maxScore() float64 {
totalWeight := beaconBlockWeight + aggregateWeight + syncContributionWeight +
attestationTotalWeight + syncCommitteesTotalWeight + attesterSlashingWeight +
proposerSlashingWeight + voluntaryExitWeight
proposerSlashingWeight + voluntaryExitWeight + blsToExecutionChangeWeight
return (maxInMeshScore + maxFirstDeliveryScore) * totalWeight
}

View File

@@ -20,6 +20,7 @@ var gossipTopicMappings = map[string]proto.Message{
AggregateAndProofSubnetTopicFormat: &ethpb.SignedAggregateAttestationAndProof{},
SyncContributionAndProofSubnetTopicFormat: &ethpb.SignedContributionAndProof{},
SyncCommitteeSubnetTopicFormat: &ethpb.SyncCommitteeMessage{},
BlsToExecutionChangeSubnetTopicFormat: &ethpb.SignedBLSToExecutionChange{},
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -12,6 +12,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/protocol"
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v3/config/params"
ecdsaprysm "github.com/prysmaticlabs/prysm/v3/crypto/ecdsa"
@@ -95,7 +96,7 @@ func TestDefaultMultiplexers(t *testing.T) {
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
assert.NoError(t, err)
assert.Equal(t, "/mplex/6.7.0", cfg.Muxers[0].ID)
assert.Equal(t, "/yamux/1.0.0", cfg.Muxers[1].ID)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[1].ID)
}

View File

@@ -26,6 +26,8 @@ const (
GossipAggregateAndProofMessage = "beacon_aggregate_and_proof"
// GossipContributionAndProofMessage is the name for the sync contribution and proof message type.
GossipContributionAndProofMessage = "sync_committee_contribution_and_proof"
// GossipBlsToExecutionChangeMessage is the name for the bls to execution change message type.
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
// Topic Formats
//
@@ -45,4 +47,6 @@ const (
AggregateAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipAggregateAndProofMessage
// SyncContributionAndProofSubnetTopicFormat is the topic format for the sync aggregate and proof subnet.
SyncContributionAndProofSubnetTopicFormat = GossipProtocolAndDigest + GossipContributionAndProofMessage
// BlsToExecutionChangeSubnetTopicFormat is the topic format for the bls to execution change subnet.
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
)

View File

@@ -86,19 +86,19 @@ func wrapAttestationsArray(
return true, nil
}
// Some endpoints e.g. https://ethereum.github.io/beacon-apis/#/Validator/getAttesterDuties expect posting a top-level array.
// Some endpoints e.g. https://ethereum.github.io/beacon-apis/#/Validator/getAttesterDuties expect posting a top-level array of validator indices.
// We make it more proto-friendly by wrapping it in a struct with an 'Index' field.
func wrapValidatorIndicesArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*DutiesRequestJson); ok {
if _, ok := endpoint.PostRequest.(*ValidatorIndicesJson); ok {
indices := make([]string, 0)
if err := json.NewDecoder(req.Body).Decode(&indices); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &DutiesRequestJson{Index: indices}
j := &ValidatorIndicesJson{Index: indices}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
@@ -445,6 +445,12 @@ type bellatrixBlindedBlockResponseJson struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
}
type capellaBlindedBlockResponseJson struct {
Version string `json:"version"`
Data *SignedBlindedBeaconBlockCapellaContainerJson `json:"data"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*BlockV2ResponseJson)
if !ok {
@@ -526,6 +532,15 @@ func serializeBlindedBlock(response interface{}) (apimiddleware.RunDefault, []by
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
actualRespContainer = &capellaBlindedBlockResponseJson{
Version: respContainer.Version,
Data: &SignedBlindedBeaconBlockCapellaContainerJson{
Message: respContainer.Data.CapellaBlock,
Signature: respContainer.Data.Signature,
},
ExecutionOptimistic: respContainer.ExecutionOptimistic,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
@@ -552,6 +567,11 @@ type bellatrixStateResponseJson struct {
Data *BeaconStateBellatrixJson `json:"data"`
}
type capellaStateResponseJson struct {
Version string `json:"version"`
Data *BeaconStateCapellaJson `json:"data"`
}
func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*BeaconStateV2ResponseJson)
if !ok {
@@ -575,6 +595,11 @@ func serializeV2State(response interface{}) (apimiddleware.RunDefault, []byte, a
Version: respContainer.Version,
Data: respContainer.Data.BellatrixState,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
actualRespContainer = &capellaStateResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.CapellaState,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported state version '%s'", respContainer.Version))
}
@@ -673,3 +698,50 @@ func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefau
}
return false, j, nil
}
func prepareForkChoiceResponse(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
dump, ok := response.(*ForkChoiceDumpJson)
if !ok {
return false, nil, apimiddleware.InternalServerError(errors.New("response is not of the correct type"))
}
nodes := make([]*ForkChoiceNodeResponseJson, len(dump.ForkChoiceNodes))
for i, n := range dump.ForkChoiceNodes {
nodes[i] = &ForkChoiceNodeResponseJson{
Slot: n.Slot,
BlockRoot: n.BlockRoot,
ParentRoot: n.ParentRoot,
JustifiedEpoch: n.JustifiedEpoch,
FinalizedEpoch: n.FinalizedEpoch,
Weight: n.Weight,
Validity: n.Validity,
ExecutionBlockHash: n.ExecutionBlockHash,
ExtraData: &ForkChoiceNodeExtraDataJson{
UnrealizedJustifiedEpoch: n.UnrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.UnrealizedFinalizedEpoch,
Balance: n.Balance,
ExecutionOptimistic: n.ExecutionOptimistic,
TimeStamp: n.TimeStamp,
},
}
}
forkChoice := &ForkChoiceResponseJson{
JustifiedCheckpoint: dump.JustifiedCheckpoint,
FinalizedCheckpoint: dump.FinalizedCheckpoint,
ForkChoiceNodes: nodes,
ExtraData: &ForkChoiceResponseExtraDataJson{
BestJustifiedCheckpoint: dump.BestJustifiedCheckpoint,
UnrealizedJustifiedCheckpoint: dump.UnrealizedJustifiedCheckpoint,
UnrealizedFinalizedCheckpoint: dump.UnrealizedFinalizedCheckpoint,
ProposerBoostRoot: dump.ProposerBoostRoot,
PreviousProposerBoostRoot: dump.PreviousProposerBoostRoot,
HeadRoot: dump.HeadRoot,
},
}
result, err := json.Marshal(forkChoice)
if err != nil {
return false, nil, apimiddleware.InternalServerError(errors.New("could not marshal fork choice to JSON"))
}
return false, result, nil
}

View File

@@ -62,7 +62,7 @@ func TestWrapAttestationArray(t *testing.T) {
func TestWrapValidatorIndicesArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &DutiesRequestJson{},
PostRequest: &ValidatorIndicesJson{},
}
unwrappedIndices := []string{"1", "2"}
unwrappedIndicesJson, err := json.Marshal(unwrappedIndices)
@@ -76,7 +76,7 @@ func TestWrapValidatorIndicesArray(t *testing.T) {
runDefault, errJson := wrapValidatorIndicesArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedIndices := &DutiesRequestJson{}
wrappedIndices := &ValidatorIndicesJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedIndices))
require.Equal(t, 2, len(wrappedIndices.Index), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedIndices.Index[0])
@@ -85,7 +85,7 @@ func TestWrapValidatorIndicesArray(t *testing.T) {
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &DutiesRequestJson{},
PostRequest: &ValidatorIndicesJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
@@ -814,6 +814,74 @@ func TestSerializeBlindedBlock(t *testing.T) {
assert.Equal(t, true, resp.ExecutionOptimistic)
})
t.Run("Capella", func(t *testing.T) {
response := &BlindedBlockResponseJson{
Version: ethpbv2.Version_CAPELLA.String(),
Data: &SignedBlindedBeaconBlockContainerJson{
CapellaBlock: &BlindedBeaconBlockCapellaJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BlindedBeaconBlockBodyCapellaJson{
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapellaJson{
ParentHash: "parent_hash",
FeeRecipient: "fee_recipient",
StateRoot: "state_root",
ReceiptsRoot: "receipts_root",
LogsBloom: "logs_bloom",
PrevRandao: "prev_randao",
BlockNumber: "block_number",
GasLimit: "gas_limit",
GasUsed: "gas_used",
TimeStamp: "time_stamp",
ExtraData: "extra_data",
BaseFeePerGas: "base_fee_per_gas",
BlockHash: "block_hash",
TransactionsRoot: "transactions_root",
WithdrawalsRoot: "withdrawals_root",
},
},
},
Signature: "sig",
},
ExecutionOptimistic: true,
}
runDefault, j, errJson := serializeBlindedBlock(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &capellaBlindedBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data.Message)
beaconBlock := resp.Data.Message
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
assert.NotNil(t, beaconBlock.Body)
payloadHeader := beaconBlock.Body.ExecutionPayloadHeader
assert.NotNil(t, payloadHeader)
assert.Equal(t, "parent_hash", payloadHeader.ParentHash)
assert.Equal(t, "fee_recipient", payloadHeader.FeeRecipient)
assert.Equal(t, "state_root", payloadHeader.StateRoot)
assert.Equal(t, "receipts_root", payloadHeader.ReceiptsRoot)
assert.Equal(t, "logs_bloom", payloadHeader.LogsBloom)
assert.Equal(t, "prev_randao", payloadHeader.PrevRandao)
assert.Equal(t, "block_number", payloadHeader.BlockNumber)
assert.Equal(t, "gas_limit", payloadHeader.GasLimit)
assert.Equal(t, "gas_used", payloadHeader.GasUsed)
assert.Equal(t, "time_stamp", payloadHeader.TimeStamp)
assert.Equal(t, "extra_data", payloadHeader.ExtraData)
assert.Equal(t, "base_fee_per_gas", payloadHeader.BaseFeePerGas)
assert.Equal(t, "block_hash", payloadHeader.BlockHash)
assert.Equal(t, "transactions_root", payloadHeader.TransactionsRoot)
assert.Equal(t, "withdrawals_root", payloadHeader.WithdrawalsRoot)
assert.Equal(t, true, resp.ExecutionOptimistic)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeBlindedBlock(response)
@@ -881,6 +949,21 @@ func TestSerializeV2State(t *testing.T) {
require.NoError(t, json.Unmarshal(j, &bellatrixStateResponseJson{}))
})
t.Run("Capella", func(t *testing.T) {
response := &BeaconStateV2ResponseJson{
Version: ethpbv2.Version_CAPELLA.String(),
Data: &BeaconStateContainerV2Json{
Phase0State: nil,
CapellaState: &BeaconStateCapellaJson{},
},
}
runDefault, j, errJson := serializeV2State(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
require.NoError(t, json.Unmarshal(j, &capellaStateResponseJson{}))
})
t.Run("incorrect response type", func(t *testing.T) {
runDefault, j, errJson := serializeV2State(&types.Empty{})
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
@@ -1119,3 +1202,113 @@ func TestSerializeProduceBlindedBlock(t *testing.T) {
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
})
}
func TestPrepareForkChoiceResponse(t *testing.T) {
dump := &ForkChoiceDumpJson{
JustifiedCheckpoint: &CheckpointJson{
Epoch: "justified",
Root: "justified",
},
FinalizedCheckpoint: &CheckpointJson{
Epoch: "finalized",
Root: "finalized",
},
BestJustifiedCheckpoint: &CheckpointJson{
Epoch: "best_justified",
Root: "best_justified",
},
UnrealizedJustifiedCheckpoint: &CheckpointJson{
Epoch: "unrealized_justified",
Root: "unrealized_justified",
},
UnrealizedFinalizedCheckpoint: &CheckpointJson{
Epoch: "unrealized_finalized",
Root: "unrealized_finalized",
},
ProposerBoostRoot: "proposer_boost_root",
PreviousProposerBoostRoot: "previous_proposer_boost_root",
HeadRoot: "head_root",
ForkChoiceNodes: []*ForkChoiceNodeJson{
{
Slot: "node1_slot",
BlockRoot: "node1_block_root",
ParentRoot: "node1_parent_root",
JustifiedEpoch: "node1_justified_epoch",
FinalizedEpoch: "node1_finalized_epoch",
UnrealizedJustifiedEpoch: "node1_unrealized_justified_epoch",
UnrealizedFinalizedEpoch: "node1_unrealized_finalized_epoch",
Balance: "node1_balance",
Weight: "node1_weight",
ExecutionOptimistic: false,
ExecutionBlockHash: "node1_execution_block_hash",
TimeStamp: "node1_time_stamp",
Validity: "node1_validity",
},
{
Slot: "node2_slot",
BlockRoot: "node2_block_root",
ParentRoot: "node2_parent_root",
JustifiedEpoch: "node2_justified_epoch",
FinalizedEpoch: "node2_finalized_epoch",
UnrealizedJustifiedEpoch: "node2_unrealized_justified_epoch",
UnrealizedFinalizedEpoch: "node2_unrealized_finalized_epoch",
Balance: "node2_balance",
Weight: "node2_weight",
ExecutionOptimistic: true,
ExecutionBlockHash: "node2_execution_block_hash",
TimeStamp: "node2_time_stamp",
Validity: "node2_validity",
},
},
}
runDefault, j, errorJson := prepareForkChoiceResponse(dump)
assert.Equal(t, nil, errorJson)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
result := &ForkChoiceResponseJson{}
require.NoError(t, json.Unmarshal(j, result))
require.NotNil(t, result)
assert.Equal(t, "justified", result.JustifiedCheckpoint.Epoch)
assert.Equal(t, "justified", result.JustifiedCheckpoint.Root)
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Epoch)
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Root)
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Epoch)
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Root)
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Epoch)
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Root)
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Epoch)
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Root)
assert.Equal(t, "proposer_boost_root", result.ExtraData.ProposerBoostRoot)
assert.Equal(t, "previous_proposer_boost_root", result.ExtraData.PreviousProposerBoostRoot)
assert.Equal(t, "head_root", result.ExtraData.HeadRoot)
require.Equal(t, 2, len(result.ForkChoiceNodes))
node1 := result.ForkChoiceNodes[0]
require.NotNil(t, node1)
assert.Equal(t, "node1_slot", node1.Slot)
assert.Equal(t, "node1_block_root", node1.BlockRoot)
assert.Equal(t, "node1_parent_root", node1.ParentRoot)
assert.Equal(t, "node1_justified_epoch", node1.JustifiedEpoch)
assert.Equal(t, "node1_finalized_epoch", node1.FinalizedEpoch)
assert.Equal(t, "node1_unrealized_justified_epoch", node1.ExtraData.UnrealizedJustifiedEpoch)
assert.Equal(t, "node1_unrealized_finalized_epoch", node1.ExtraData.UnrealizedFinalizedEpoch)
assert.Equal(t, "node1_balance", node1.ExtraData.Balance)
assert.Equal(t, "node1_weight", node1.Weight)
assert.Equal(t, false, node1.ExtraData.ExecutionOptimistic)
assert.Equal(t, "node1_execution_block_hash", node1.ExecutionBlockHash)
assert.Equal(t, "node1_time_stamp", node1.ExtraData.TimeStamp)
assert.Equal(t, "node1_validity", node1.Validity)
node2 := result.ForkChoiceNodes[1]
require.NotNil(t, node2)
assert.Equal(t, "node2_slot", node2.Slot)
assert.Equal(t, "node2_block_root", node2.BlockRoot)
assert.Equal(t, "node2_parent_root", node2.ParentRoot)
assert.Equal(t, "node2_justified_epoch", node2.JustifiedEpoch)
assert.Equal(t, "node2_finalized_epoch", node2.FinalizedEpoch)
assert.Equal(t, "node2_unrealized_justified_epoch", node2.ExtraData.UnrealizedJustifiedEpoch)
assert.Equal(t, "node2_unrealized_finalized_epoch", node2.ExtraData.UnrealizedFinalizedEpoch)
assert.Equal(t, "node2_balance", node2.ExtraData.Balance)
assert.Equal(t, "node2_weight", node2.Weight)
assert.Equal(t, true, node2.ExtraData.ExecutionOptimistic)
assert.Equal(t, "node2_execution_block_hash", node2.ExecutionBlockHash)
assert.Equal(t, "node2_time_stamp", node2.ExtraData.TimeStamp)
assert.Equal(t, "node2_validity", node2.Validity)
}

View File

@@ -52,7 +52,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v2/debug/beacon/states/{state_id}",
"/eth/v1/debug/beacon/heads",
"/eth/v2/debug/beacon/heads",
"/eth/v1/debug/beacon/forkchoice",
"/eth/v1/debug/forkchoice",
"/eth/v1/config/fork_schedule",
"/eth/v1/config/deposit_contract",
"/eth/v1/config/spec",
@@ -72,6 +72,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/validator/contribution_and_proofs",
"/eth/v1/validator/prepare_beacon_proposer",
"/eth/v1/validator/register_validator",
"/eth/v1/validator/liveness/{epoch}",
}
}
@@ -196,8 +197,11 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.GetResponse = &ForkChoiceHeadsResponseJson{}
case "/eth/v2/debug/beacon/heads":
endpoint.GetResponse = &V2ForkChoiceHeadsResponseJson{}
case "/eth/v1/debug/beacon/forkchoice":
endpoint.GetResponse = &ForkchoiceResponse{}
case "/eth/v1/debug/forkchoice":
endpoint.GetResponse = &ForkChoiceDumpJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: prepareForkChoiceResponse,
}
case "/eth/v1/config/fork_schedule":
endpoint.GetResponse = &ForkScheduleResponseJson{}
case "/eth/v1/config/deposit_contract":
@@ -207,7 +211,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
case "/eth/v1/events":
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleEvents}
case "/eth/v1/validator/duties/attester/{epoch}":
endpoint.PostRequest = &DutiesRequestJson{}
endpoint.PostRequest = &ValidatorIndicesJson{}
endpoint.PostResponse = &AttesterDutiesResponseJson{}
endpoint.RequestURLLiterals = []string{"epoch"}
endpoint.Err = &NodeSyncDetailsErrorJson{}
@@ -219,7 +223,7 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.RequestURLLiterals = []string{"epoch"}
endpoint.Err = &NodeSyncDetailsErrorJson{}
case "/eth/v1/validator/duties/sync/{epoch}":
endpoint.PostRequest = &DutiesRequestJson{}
endpoint.PostRequest = &ValidatorIndicesJson{}
endpoint.PostResponse = &SyncCommitteeDutiesResponseJson{}
endpoint.RequestURLLiterals = []string{"epoch"}
endpoint.Err = &NodeSyncDetailsErrorJson{}
@@ -287,6 +291,14 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedValidatorRegistrationsArray,
}
case "/eth/v1/validator/liveness/{epoch}":
endpoint.PostRequest = &ValidatorIndicesJson{}
endpoint.PostResponse = &LivenessResponseJson{}
endpoint.RequestURLLiterals = []string{"epoch"}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapValidatorIndicesArray,
}
default:
return nil, errors.New("invalid path")
}

View File

@@ -213,7 +213,7 @@ type SpecResponseJson struct {
Data interface{} `json:"data"`
}
type DutiesRequestJson struct {
type ValidatorIndicesJson struct {
Index []string `json:"index"`
}
@@ -290,16 +290,47 @@ type SubmitContributionAndProofsRequestJson struct {
Data []*SignedContributionAndProofJson `json:"data"`
}
type ForkchoiceResponse struct {
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
HeadRoot string `json:"head_root" hex:"true"`
ForkChoiceNodes []*ForkChoiceNodeJson `json:"forkchoice_nodes"`
type ForkChoiceNodeResponseJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
ParentRoot string `json:"parent_root" hex:"true"`
JustifiedEpoch string `json:"justified_epoch"`
FinalizedEpoch string `json:"finalized_epoch"`
Weight string `json:"weight"`
Validity string `json:"validity" enum:"true"`
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
ExtraData *ForkChoiceNodeExtraDataJson `json:"extra_data"`
}
type ForkChoiceNodeExtraDataJson struct {
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
Balance string `json:"balance"`
ExecutionOptimistic bool `json:"execution_optimistic"`
TimeStamp string `json:"timestamp"`
}
type ForkChoiceResponseJson struct {
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
ForkChoiceNodes []*ForkChoiceNodeResponseJson `json:"fork_choice_nodes"`
ExtraData *ForkChoiceResponseExtraDataJson `json:"extra_data"`
}
type ForkChoiceResponseExtraDataJson struct {
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
HeadRoot string `json:"head_root" hex:"true"`
}
type LivenessResponseJson struct {
Data []*struct {
Index string `json:"index"`
IsLive bool `json:"is_live"`
} `json:"data"`
}
//----------------
@@ -350,6 +381,7 @@ type SignedBlindedBeaconBlockContainerJson struct {
Phase0Block *BeaconBlockJson `json:"phase0_block"`
AltairBlock *BeaconBlockAltairJson `json:"altair_block"`
BellatrixBlock *BlindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
CapellaBlock *BlindedBeaconBlockCapellaJson `json:"capella_block"`
Signature string `json:"signature" hex:"true"`
}
@@ -380,6 +412,11 @@ type SignedBlindedBeaconBlockBellatrixContainerJson struct {
Signature string `json:"signature" hex:"true"`
}
type SignedBlindedBeaconBlockCapellaContainerJson struct {
Message *BlindedBeaconBlockCapellaJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type BeaconBlockAltairJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
@@ -404,6 +441,14 @@ type BlindedBeaconBlockBellatrixJson struct {
Body *BlindedBeaconBlockBodyBellatrixJson `json:"body"`
}
type BlindedBeaconBlockCapellaJson struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
Body *BlindedBeaconBlockBodyCapellaJson `json:"body"`
}
type BeaconBlockBodyAltairJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
@@ -442,6 +487,20 @@ type BlindedBeaconBlockBodyBellatrixJson struct {
ExecutionPayloadHeader *ExecutionPayloadHeaderJson `json:"execution_payload_header"`
}
type BlindedBeaconBlockBodyCapellaJson struct {
RandaoReveal string `json:"randao_reveal" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Graffiti string `json:"graffiti" hex:"true"`
ProposerSlashings []*ProposerSlashingJson `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingJson `json:"attester_slashings"`
Attestations []*AttestationJson `json:"attestations"`
Deposits []*DepositJson `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExitJson `json:"voluntary_exits"`
SyncAggregate *SyncAggregateJson `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"execution_payload_header"`
BLSToExecutionChanges []*BLSToExecutionChangeJson `json:"bls_to_execution_changes"`
}
type ExecutionPayloadJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
@@ -476,6 +535,24 @@ type ExecutionPayloadHeaderJson struct {
TransactionsRoot string `json:"transactions_root" hex:"true"`
}
type ExecutionPayloadHeaderCapellaJson struct {
ParentHash string `json:"parent_hash" hex:"true"`
FeeRecipient string `json:"fee_recipient" hex:"true"`
StateRoot string `json:"state_root" hex:"true"`
ReceiptsRoot string `json:"receipts_root" hex:"true"`
LogsBloom string `json:"logs_bloom" hex:"true"`
PrevRandao string `json:"prev_randao" hex:"true"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
TimeStamp string `json:"timestamp"`
ExtraData string `json:"extra_data" hex:"true"`
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
BlockHash string `json:"block_hash" hex:"true"`
TransactionsRoot string `json:"transactions_root" hex:"true"`
WithdrawalsRoot string `json:"withdrawals_root" hex:"true"`
}
type SyncAggregateJson struct {
SyncCommitteeBits string `json:"sync_committee_bits" hex:"true"`
SyncCommitteeSignature string `json:"sync_committee_signature" hex:"true"`
@@ -546,6 +623,12 @@ type AttestationDataJson struct {
Target *CheckpointJson `json:"target"`
}
type BLSToExecutionChangeJson struct {
ValidatorIndex string `json:"validator_index"`
FromBLSPubkey string `json:"from_bls_pubkey" hex:"true"`
ToExecutionAddress string `json:"to_execution_address" hex:"true"`
}
type DepositJson struct {
Proof []string `json:"proof" hex:"true"`
Data *Deposit_DataJson `json:"data"`
@@ -679,10 +762,41 @@ type BeaconStateBellatrixJson struct {
LatestExecutionPayloadHeader *ExecutionPayloadHeaderJson `json:"latest_execution_payload_header"`
}
type BeaconStateCapellaJson struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
Slot string `json:"slot"`
Fork *ForkJson `json:"fork"`
LatestBlockHeader *BeaconBlockHeaderJson `json:"latest_block_header"`
BlockRoots []string `json:"block_roots" hex:"true"`
StateRoots []string `json:"state_roots" hex:"true"`
HistoricalRoots []string `json:"historical_roots" hex:"true"`
Eth1Data *Eth1DataJson `json:"eth1_data"`
Eth1DataVotes []*Eth1DataJson `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*ValidatorJson `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes" hex:"true"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation EpochParticipation `json:"previous_epoch_participation"`
CurrentEpochParticipation EpochParticipation `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits" hex:"true"`
PreviousJustifiedCheckpoint *CheckpointJson `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *CheckpointJson `json:"current_justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommitteeJson `json:"current_sync_committee"`
NextSyncCommittee *SyncCommitteeJson `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderCapellaJson `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
}
type BeaconStateContainerV2Json struct {
Phase0State *BeaconStateJson `json:"phase0_state"`
AltairState *BeaconStateAltairJson `json:"altair_state"`
BellatrixState *BeaconStateBellatrixJson `json:"bellatrix_state"`
CapellaState *BeaconStateCapellaJson `json:"capella_state"`
}
type ForkJson struct {
@@ -823,7 +937,7 @@ type SignedValidatorRegistrationsRequestJson struct {
type ForkChoiceNodeJson struct {
Slot string `json:"slot"`
Root string `json:"root" hex:"true"`
BlockRoot string `json:"block_root" hex:"true"`
ParentRoot string `json:"parent_root" hex:"true"`
JustifiedEpoch string `json:"justified_epoch"`
FinalizedEpoch string `json:"finalized_epoch"`
@@ -832,8 +946,21 @@ type ForkChoiceNodeJson struct {
Balance string `json:"balance"`
Weight string `json:"weight"`
ExecutionOptimistic bool `json:"execution_optimistic"`
ExecutionPayload string `json:"execution_payload" hex:"true"`
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
TimeStamp string `json:"timestamp"`
Validity string `json:"validity" enum:"true"`
}
type ForkChoiceDumpJson struct {
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
HeadRoot string `json:"head_root" hex:"true"`
ForkChoiceNodes []*ForkChoiceNodeJson `json:"fork_choice_nodes"`
}
//----------------

View File

@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"blinded_blocks.go",
"blocks.go",
"config.go",
"log.go",
@@ -69,6 +70,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"blinded_blocks_test.go",
"blocks_test.go",
"config_test.go",
"init_test.go",

View File

@@ -0,0 +1,246 @@
package beacon
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v3/proto/migration"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// GetBlindedBlock retrieves blinded block for given block id.
func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.BlindedBlockResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlock")
defer span.End()
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
err = handleGetBlockError(blk, err)
if err != nil {
return nil, err
}
result, err := getBlindedBlockPhase0(blk)
if result != nil {
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = getBlindedBlockAltair(blk)
if result != nil {
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockBellatrix(ctx, blk)
if result != nil {
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
result, err = bs.getBlindedBlockCapella(ctx, blk)
if result != nil {
return result, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
func getBlindedBlockPhase0(blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
phase0Blk, err := blk.PbPhase0Block()
if err != nil {
return nil, err
}
if phase0Blk == nil {
return nil, errNilBlock
}
v1Blk, err := migration.SignedBeaconBlock(blk)
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Blk.Block},
Signature: v1Blk.Signature,
},
ExecutionOptimistic: false,
}, nil
}
func getBlindedBlockAltair(blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
altairBlk, err := blk.PbAltairBlock()
if err != nil {
return nil, err
}
if altairBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: false,
}, nil
}
func (bs *Server) getBlindedBlockBellatrix(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
bellatrixBlk, err := blk.PbBellatrixBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, blocks.ErrUnsupportedGetter) {
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
return nil, err
}
return nil, err
}
if bellatrixBlk == nil {
return nil, errNilBlock
}
blindedBlkInterface, err := blk.ToBlinded()
if err != nil {
return nil, errors.Wrapf(err, "could not convert block to blinded block")
}
blindedBellatrixBlock, err := blindedBlkInterface.PbBlindedBellatrixBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlock.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
func (bs *Server) getBlindedBlockCapella(ctx context.Context, blk interfaces.SignedBeaconBlock) (*ethpbv2.BlindedBlockResponse, error) {
capellaBlk, err := blk.PbCapellaBlock()
if err != nil {
// ErrUnsupportedGetter means that we have another block type
if errors.Is(err, blocks.ErrUnsupportedGetter) {
if blindedCapellaBlk, err := blk.PbBlindedCapellaBlock(); err == nil {
if blindedCapellaBlk == nil {
return nil, errNilBlock
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlk.Block)
if err != nil {
return nil, errors.Wrapf(err, "Could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_CAPELLA,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
return nil, err
}
}
if capellaBlk == nil {
return nil, errNilBlock
}
blindedBlkInterface, err := blk.ToBlinded()
if err != nil {
return nil, errors.Wrapf(err, "could not convert block to blinded block")
}
blindedCapellaBlock, err := blindedBlkInterface.PbBlindedCapellaBlock()
if err != nil {
return nil, errors.Wrapf(err, "could not get signed beacon block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(blindedCapellaBlock.Block)
if err != nil {
return nil, errors.Wrapf(err, "could not convert beacon block")
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, errors.Wrapf(err, "could not get block root")
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, errors.Wrapf(err, "could not check if block is optimistic")
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_CAPELLA,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock{CapellaBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}

View File

@@ -0,0 +1,594 @@
package beacon
import (
"context"
"fmt"
"reflect"
"testing"
mock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
executionTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v3/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v3/proto/migration"
ethpbalpha "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
)
func TestServer_GetBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetPhase0Block().Block.Slot + 1
b2 := util.NewBeaconBlock()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBeaconBlock()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBeaconBlock()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBeaconBlock
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
require.NoError(t, err)
phase0Block, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
require.Equal(t, true, ok)
if !reflect.DeepEqual(phase0Block.Phase0Block, v1Block.Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_PHASE0, blk.Version)
})
}
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetAltairBlock().Block.Slot + 1
b2 := util.NewBeaconBlockAltair()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBeaconBlockAltair()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBeaconBlockAltair()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBeaconBlockAltair
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].GetAltairBlock(),
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].GetAltairBlock(),
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.GetAltairBlock(),
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].GetAltairBlock(),
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].GetAltairBlock(),
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
require.NoError(t, err)
altairBlock, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
require.Equal(t, true, ok)
if !reflect.DeepEqual(altairBlock.AltairBlock, v2Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_ALTAIR, blk.Version)
})
}
})
t.Run("Bellatrix", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetBlindedBellatrixBlock().Block.Slot + 1
b2 := util.NewBlindedBeaconBlockBellatrix()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBlindedBeaconBlockBellatrix()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBlindedBeaconBlockBellatrix()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
ExecutionPayloadReconstructor: &executionTest.EngineClient{
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
},
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBlindedBeaconBlockBellatrix
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].GetBlindedBellatrixBlock(),
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].GetBlindedBellatrixBlock(),
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.GetBlindedBellatrixBlock(),
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].GetBlindedBellatrixBlock(),
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].GetBlindedBellatrixBlock(),
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v2Block, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(tt.want.Block)
require.NoError(t, err)
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
require.Equal(t, true, ok)
if !reflect.DeepEqual(b.BellatrixBlock, v2Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_BELLATRIX, blk.Version)
})
}
})
t.Run("Capella", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocksCapellaBlinded(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetBlindedCapellaBlock().Block.Slot + 1
b2 := util.NewBlindedBeaconBlockCapella()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBlindedBeaconBlockCapella()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBlindedBeaconBlockCapella()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedCapellaBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
ExecutionPayloadReconstructor: &executionTest.EngineClient{
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
},
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBlindedBeaconBlockCapella
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].GetBlindedCapellaBlock(),
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].GetBlindedCapellaBlock(),
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.GetBlindedCapellaBlock(),
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].GetBlindedCapellaBlock(),
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].GetBlindedCapellaBlock(),
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v2Block, err := migration.V1Alpha1BeaconBlockBlindedCapellaToV2Blinded(tt.want.Block)
require.NoError(t, err)
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_CapellaBlock)
require.Equal(t, true, ok)
if !reflect.DeepEqual(b.CapellaBlock, v2Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_CAPELLA, blk.Version)
})
}
})
t.Run("execution optimistic", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
_, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
b2 := util.NewBeaconBlockBellatrix()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBeaconBlockBellatrix()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBellatrixBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
})
require.NoError(t, err)
assert.Equal(t, true, blk.ExecutionOptimistic)
})
}

View File

@@ -34,6 +34,10 @@ import (
const versionHeader = "eth-consensus-version"
var (
errNilBlock = errors.New("nil block")
)
// blockIdParseError represents an error scenario where a block ID could not be parsed.
type blockIdParseError struct {
message string
@@ -684,138 +688,6 @@ func (bs *Server) GetBlockSSZV2(ctx context.Context, req *ethpbv2.BlockRequestV2
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
// GetBlindedBlock retrieves blinded block for given block id.
func (bs *Server) GetBlindedBlock(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.BlindedBlockResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlock")
defer span.End()
blk, err := bs.blockFromBlockID(ctx, req.BlockId)
err = handleGetBlockError(blk, err)
if err != nil {
return nil, err
}
phase0Blk, err := blk.PbPhase0Block()
if err == nil {
if phase0Blk == nil {
return nil, status.Error(codes.Internal, "Nil block")
}
v1Blk, err := migration.SignedBeaconBlock(blk)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Blk.Block},
Signature: v1Blk.Signature,
},
ExecutionOptimistic: false,
}, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
altairBlk, err := blk.PbAltairBlock()
if err == nil {
if altairBlk == nil {
return nil, status.Error(codes.Internal, "Nil block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: false,
}, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
bellatrixBlk, err := blk.PbBellatrixBlock()
if err == nil {
if bellatrixBlk == nil {
return nil, status.Error(codes.Internal, "Nil block")
}
blindedBlkInterface, err := blk.ToBlinded()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert block to blinded block: %v", err)
}
blindedBellatrixBlock, err := blindedBlkInterface.PbBlindedBellatrixBlock()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlock.Block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
if blindedBellatrixBlk, err := blk.PbBlindedBellatrixBlock(); err == nil {
if blindedBellatrixBlk == nil {
return nil, status.Error(codes.Internal, "Nil block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(blindedBellatrixBlk.Block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
sig := blk.Signature()
return &ethpbv2.BlindedBlockResponse{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.SignedBlindedBeaconBlockContainer{
Message: &ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: v2Blk},
Signature: sig[:],
},
ExecutionOptimistic: isOptimistic,
}, nil
}
// ErrUnsupportedGetter means that we have another block type
if !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return nil, status.Errorf(codes.Internal, "Unknown block type %T", blk)
}
// GetBlindedBlockSSZ returns the SSZ-serialized version of the blinded beacon block for given block id.
func (bs *Server) GetBlindedBlockSSZ(ctx context.Context, req *ethpbv1.BlockRequest) (*ethpbv2.SSZContainer, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetBlindedBlockSSZ")

View File

@@ -200,6 +200,48 @@ func fillDBTestBlocksBellatrixBlinded(ctx context.Context, t *testing.T, beaconD
return genBlk, blkContainers
}
func fillDBTestBlocksCapellaBlinded(ctx context.Context, t *testing.T, beaconDB db.Database) (*ethpbalpha.SignedBlindedBeaconBlockCapella, []*ethpbalpha.BeaconBlockContainer) {
parentRoot := [32]byte{1, 2, 3}
genBlk := util.NewBlindedBeaconBlockCapella()
genBlk.Block.ParentRoot = parentRoot[:]
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, genBlk)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
count := types.Slot(100)
blks := make([]interfaces.SignedBeaconBlock, count)
blkContainers := make([]*ethpbalpha.BeaconBlockContainer, count)
for i := types.Slot(0); i < count; i++ {
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = i
b.Block.ParentRoot = bytesutil.PadTo([]byte{uint8(i)}, 32)
att1 := util.NewAttestation()
att1.Data.Slot = i
att1.Data.CommitteeIndex = types.CommitteeIndex(i)
att2 := util.NewAttestation()
att2.Data.Slot = i
att2.Data.CommitteeIndex = types.CommitteeIndex(i + 1)
b.Block.Body.Attestations = []*ethpbalpha.Attestation{att1, att2}
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
signedB, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
blks[i] = signedB
blkContainers[i] = &ethpbalpha.BeaconBlockContainer{
Block: &ethpbalpha.BeaconBlockContainer_BlindedCapellaBlock{BlindedCapellaBlock: b}, BlockRoot: root[:]}
}
require.NoError(t, beaconDB.SaveBlocks(ctx, blks))
headRoot := bytesutil.ToBytes32(blkContainers[len(blks)-1].BlockRoot)
summary := &ethpbalpha.StateSummary{
Root: headRoot[:],
Slot: blkContainers[len(blks)-1].Block.(*ethpbalpha.BeaconBlockContainer_BlindedCapellaBlock).BlindedCapellaBlock.Block.Slot,
}
require.NoError(t, beaconDB.SaveStateSummary(ctx, summary))
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
return genBlk, blkContainers
}
func TestServer_GetBlockHeader(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
@@ -1542,444 +1584,6 @@ func TestServer_GetBlockV2(t *testing.T) {
})
}
func TestServer_GetBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocks(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetPhase0Block().Block.Slot + 1
b2 := util.NewBeaconBlock()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBeaconBlock()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBeaconBlock()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: wsb,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBeaconBlock
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
require.NoError(t, err)
phase0Block, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
require.Equal(t, true, ok)
if !reflect.DeepEqual(phase0Block.Phase0Block, v1Block.Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_PHASE0, blk.Version)
})
}
})
t.Run("Altair", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocksAltair(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetAltairBlock().Block.Slot + 1
b2 := util.NewBeaconBlockAltair()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBeaconBlockAltair()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBeaconBlockAltair()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetAltairBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBeaconBlockAltair
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].GetAltairBlock(),
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].GetAltairBlock(),
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.GetAltairBlock(),
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].GetAltairBlock(),
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].GetAltairBlock(),
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
require.NoError(t, err)
altairBlock, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
require.Equal(t, true, ok)
if !reflect.DeepEqual(altairBlock.AltairBlock, v2Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_ALTAIR, blk.Version)
})
}
})
t.Run("Bellatrix", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
genBlk, blkContainers := fillDBTestBlocksBellatrixBlinded(ctx, t, beaconDB)
canonicalRoots := make(map[[32]byte]bool)
for _, bContr := range blkContainers {
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
}
headBlock := blkContainers[len(blkContainers)-1]
nextSlot := headBlock.GetBlindedBellatrixBlock().Block.Slot + 1
b2 := util.NewBlindedBeaconBlockBellatrix()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBlindedBeaconBlockBellatrix()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
b4 := util.NewBlindedBeaconBlockBellatrix()
b4.Block.Slot = nextSlot
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
util.SaveBlock(t, ctx, beaconDB, b4)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBlindedBellatrixBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
CanonicalRoots: canonicalRoots,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
ExecutionPayloadReconstructor: &executionTest.EngineClient{
ExecutionPayloadByBlockHash: map[[32]byte]*enginev1.ExecutionPayload{},
},
}
root, err := genBlk.Block.HashTreeRoot()
require.NoError(t, err)
tests := []struct {
name string
blockID []byte
want *ethpbalpha.SignedBlindedBeaconBlockBellatrix
wantErr bool
}{
{
name: "slot",
blockID: []byte("30"),
want: blkContainers[30].GetBlindedBellatrixBlock(),
},
{
name: "bad formatting",
blockID: []byte("3bad0"),
wantErr: true,
},
{
name: "canonical",
blockID: []byte("30"),
want: blkContainers[30].GetBlindedBellatrixBlock(),
},
{
name: "non canonical",
blockID: []byte(fmt.Sprintf("%d", nextSlot)),
wantErr: true,
},
{
name: "head",
blockID: []byte("head"),
want: headBlock.GetBlindedBellatrixBlock(),
},
{
name: "finalized",
blockID: []byte("finalized"),
want: blkContainers[64].GetBlindedBellatrixBlock(),
},
{
name: "genesis",
blockID: []byte("genesis"),
want: genBlk,
},
{
name: "genesis root",
blockID: root[:],
want: genBlk,
},
{
name: "root",
blockID: blkContainers[20].BlockRoot,
want: blkContainers[20].GetBlindedBellatrixBlock(),
},
{
name: "non-existent root",
blockID: bytesutil.PadTo([]byte("hi there"), 32),
wantErr: true,
},
{
name: "no block",
blockID: []byte("105"),
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: tt.blockID,
})
if tt.wantErr {
require.NotEqual(t, err, nil)
return
}
require.NoError(t, err)
v2Block, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(tt.want.Block)
require.NoError(t, err)
b, ok := blk.Data.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
require.Equal(t, true, ok)
if !reflect.DeepEqual(b.BellatrixBlock, v2Block) {
t.Error("Expected blocks to equal")
}
assert.Equal(t, ethpbv2.Version_BELLATRIX, blk.Version)
})
}
})
t.Run("execution optimistic", func(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()
_, blkContainers := fillDBTestBlocksBellatrix(ctx, t, beaconDB)
headBlock := blkContainers[len(blkContainers)-1]
b2 := util.NewBeaconBlockBellatrix()
b2.Block.Slot = 30
b2.Block.ParentRoot = bytesutil.PadTo([]byte{1}, 32)
util.SaveBlock(t, ctx, beaconDB, b2)
b3 := util.NewBeaconBlockBellatrix()
b3.Block.Slot = 30
b3.Block.ParentRoot = bytesutil.PadTo([]byte{4}, 32)
util.SaveBlock(t, ctx, beaconDB, b3)
chainBlk, err := blocks.NewSignedBeaconBlock(headBlock.GetBellatrixBlock())
require.NoError(t, err)
mockChainService := &mock.ChainService{
DB: beaconDB,
Block: chainBlk,
Root: headBlock.BlockRoot,
FinalizedCheckPoint: &ethpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
Optimistic: true,
}
bs := &Server{
BeaconDB: beaconDB,
ChainInfoFetcher: mockChainService,
HeadFetcher: mockChainService,
OptimisticModeFetcher: mockChainService,
}
blk, err := bs.GetBlindedBlock(ctx, &ethpbv1.BlockRequest{
BlockId: []byte("head"),
})
require.NoError(t, err)
assert.Equal(t, true, blk.ExecutionOptimistic)
})
}
func TestServer_GetBlockSSZ(t *testing.T) {
beaconDB := dbTest.SetupDB(t)
ctx := context.Background()

View File

@@ -492,8 +492,9 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
newBs, err := bs.Copy()
require.NoError(t, err)
newBs, err = transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
@@ -659,8 +660,9 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
newBs, err := bs.Copy()
require.NoError(t, err)
newBs, err = transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
@@ -787,8 +789,9 @@ func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
Signature: make([]byte, 96),
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
newBs, err := bs.Copy()
require.NoError(t, err)
newBs, err = transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
require.NoError(t, err)
sb, err := signing.ComputeDomainAndSign(newBs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])

View File

@@ -83,6 +83,18 @@ func (ds *Server) GetBeaconStateV2(ctx context.Context, req *ethpbv2.BeaconState
},
ExecutionOptimistic: isOptimistic,
}, nil
case version.Capella:
protoState, err := migration.BeaconStateCapellaToProto(beaconSt)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert state to proto: %v", err)
}
return &ethpbv2.BeaconStateResponseV2{
Version: ethpbv2.Version_CAPELLA,
Data: &ethpbv2.BeaconStateContainer{
State: &ethpbv2.BeaconStateContainer_CapellaState{CapellaState: protoState},
},
ExecutionOptimistic: isOptimistic,
}, nil
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}
@@ -110,6 +122,8 @@ func (ds *Server) GetBeaconStateSSZV2(ctx context.Context, req *ethpbv2.BeaconSt
ver = ethpbv2.Version_ALTAIR
case version.Bellatrix:
ver = ethpbv2.Version_BELLATRIX
case version.Capella:
ver = ethpbv2.Version_CAPELLA
default:
return nil, status.Error(codes.Internal, "Unsupported state version")
}
@@ -142,6 +156,6 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
}
// GetForkChoice returns a dump fork choice store.
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceResponse, error) {
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
}

View File

@@ -76,6 +76,23 @@ func TestGetBeaconStateV2(t *testing.T) {
assert.NotNil(t, resp)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
})
t.Run("Capella", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateCapella(t, 1)
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
HeadFetcher: &blockchainmock.ChainService{},
OptimisticModeFetcher: &blockchainmock.ChainService{},
BeaconDB: db,
}
resp, err := server.GetBeaconStateV2(context.Background(), &ethpbv2.BeaconStateRequestV2{
StateId: make([]byte, 0),
})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
t.Run("execution optimistic", func(t *testing.T) {
parentRoot := [32]byte{'a'}
blk := util.NewBeaconBlock()
@@ -182,6 +199,25 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
})
t.Run("Capella", func(t *testing.T) {
fakeState, _ := util.DeterministicGenesisStateCapella(t, 1)
sszState, err := fakeState.MarshalSSZ()
require.NoError(t, err)
server := &Server{
StateFetcher: &testutil.MockFetcher{
BeaconState: fakeState,
},
}
resp, err := server.GetBeaconStateSSZV2(context.Background(), &ethpbv2.BeaconStateRequestV2{
StateId: make([]byte, 0),
})
require.NoError(t, err)
assert.NotNil(t, resp)
assert.DeepEqual(t, sszState, resp.Data)
assert.Equal(t, ethpbv2.Version_CAPELLA, resp.Version)
})
}
func TestListForkChoiceHeadsV2(t *testing.T) {

Some files were not shown because too many files have changed in this diff Show More